Commit 7589ad67 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: remove __blk_rq_map_user_iov

Just duplicate a small amount of code in the low-level map into the bio
and copy to the bio routines, leading to much easier to follow and
maintain code, and better shared error handling.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7b63c052
...@@ -127,24 +127,12 @@ static int bio_uncopy_user(struct bio *bio) ...@@ -127,24 +127,12 @@ static int bio_uncopy_user(struct bio *bio)
return ret; return ret;
} }
/** static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
* bio_copy_user_iov - copy user data to bio struct iov_iter *iter, gfp_t gfp_mask)
* @q: destination block queue
* @map_data: pointer to the rq_map_data holding pages (if necessary)
* @iter: iovec iterator
* @gfp_mask: memory allocation flags
*
* Prepares and returns a bio for indirect user io, bouncing data
* to/from kernel pages as necessary. Must be paired with
* call bio_uncopy_user() on io completion.
*/
static struct bio *bio_copy_user_iov(struct request_queue *q,
struct rq_map_data *map_data, struct iov_iter *iter,
gfp_t gfp_mask)
{ {
struct bio_map_data *bmd; struct bio_map_data *bmd;
struct page *page; struct page *page;
struct bio *bio; struct bio *bio, *bounce_bio;
int i = 0, ret; int i = 0, ret;
int nr_pages; int nr_pages;
unsigned int len = iter->count; unsigned int len = iter->count;
...@@ -152,7 +140,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, ...@@ -152,7 +140,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
bmd = bio_alloc_map_data(iter, gfp_mask); bmd = bio_alloc_map_data(iter, gfp_mask);
if (!bmd) if (!bmd)
return ERR_PTR(-ENOMEM); return -ENOMEM;
/* /*
* We need to do a deep copy of the iov_iter including the iovecs. * We need to do a deep copy of the iov_iter including the iovecs.
...@@ -169,8 +157,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, ...@@ -169,8 +157,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
bio = bio_kmalloc(gfp_mask, nr_pages); bio = bio_kmalloc(gfp_mask, nr_pages);
if (!bio) if (!bio)
goto out_bmd; goto out_bmd;
bio->bi_opf |= req_op(rq);
ret = 0;
if (map_data) { if (map_data) {
nr_pages = 1 << map_data->page_order; nr_pages = 1 << map_data->page_order;
...@@ -187,7 +174,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, ...@@ -187,7 +174,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
if (map_data) { if (map_data) {
if (i == map_data->nr_entries * nr_pages) { if (i == map_data->nr_entries * nr_pages) {
ret = -ENOMEM; ret = -ENOMEM;
break; goto cleanup;
} }
page = map_data->pages[i / nr_pages]; page = map_data->pages[i / nr_pages];
...@@ -195,14 +182,14 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, ...@@ -195,14 +182,14 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
i++; i++;
} else { } else {
page = alloc_page(q->bounce_gfp | gfp_mask); page = alloc_page(rq->q->bounce_gfp | gfp_mask);
if (!page) { if (!page) {
ret = -ENOMEM; ret = -ENOMEM;
break; goto cleanup;
} }
} }
if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) { if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
if (!map_data) if (!map_data)
__free_page(page); __free_page(page);
break; break;
...@@ -212,9 +199,6 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, ...@@ -212,9 +199,6 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
offset = 0; offset = 0;
} }
if (ret)
goto cleanup;
if (map_data) if (map_data)
map_data->offset += bio->bi_iter.bi_size; map_data->offset += bio->bi_iter.bi_size;
...@@ -236,39 +220,42 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, ...@@ -236,39 +220,42 @@ static struct bio *bio_copy_user_iov(struct request_queue *q,
bio->bi_private = bmd; bio->bi_private = bmd;
if (map_data && map_data->null_mapped) if (map_data && map_data->null_mapped)
bmd->is_null_mapped = true; bmd->is_null_mapped = true;
return bio;
bounce_bio = bio;
ret = blk_rq_append_bio(rq, &bounce_bio);
if (ret)
goto cleanup;
/*
* We link the bounce buffer in and could have to traverse it later, so
* we have to get a ref to prevent it from being freed
*/
bio_get(bounce_bio);
return 0;
cleanup: cleanup:
if (!map_data) if (!map_data)
bio_free_pages(bio); bio_free_pages(bio);
bio_put(bio); bio_put(bio);
out_bmd: out_bmd:
kfree(bmd); kfree(bmd);
return ERR_PTR(ret); return ret;
} }
/** static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
* bio_map_user_iov - map user iovec into bio gfp_t gfp_mask)
* @q: the struct request_queue for the bio
* @iter: iovec iterator
* @gfp_mask: memory allocation flags
*
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
static struct bio *bio_map_user_iov(struct request_queue *q,
struct iov_iter *iter, gfp_t gfp_mask)
{ {
unsigned int max_sectors = queue_max_hw_sectors(q); unsigned int max_sectors = queue_max_hw_sectors(rq->q);
int j; struct bio *bio, *bounce_bio;
struct bio *bio;
int ret; int ret;
int j;
if (!iov_iter_count(iter)) if (!iov_iter_count(iter))
return ERR_PTR(-EINVAL); return -EINVAL;
bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES)); bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
if (!bio) if (!bio)
return ERR_PTR(-ENOMEM); return -ENOMEM;
bio->bi_opf |= req_op(rq);
while (iov_iter_count(iter)) { while (iov_iter_count(iter)) {
struct page **pages; struct page **pages;
...@@ -284,7 +271,7 @@ static struct bio *bio_map_user_iov(struct request_queue *q, ...@@ -284,7 +271,7 @@ static struct bio *bio_map_user_iov(struct request_queue *q,
npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
if (unlikely(offs & queue_dma_alignment(q))) { if (unlikely(offs & queue_dma_alignment(rq->q))) {
ret = -EINVAL; ret = -EINVAL;
j = 0; j = 0;
} else { } else {
...@@ -296,7 +283,7 @@ static struct bio *bio_map_user_iov(struct request_queue *q, ...@@ -296,7 +283,7 @@ static struct bio *bio_map_user_iov(struct request_queue *q,
if (n > bytes) if (n > bytes)
n = bytes; n = bytes;
if (!bio_add_hw_page(q, bio, page, n, offs, if (!bio_add_hw_page(rq->q, bio, page, n, offs,
max_sectors, &same_page)) { max_sectors, &same_page)) {
if (same_page) if (same_page)
put_page(page); put_page(page);
...@@ -323,18 +310,30 @@ static struct bio *bio_map_user_iov(struct request_queue *q, ...@@ -323,18 +310,30 @@ static struct bio *bio_map_user_iov(struct request_queue *q,
bio_set_flag(bio, BIO_USER_MAPPED); bio_set_flag(bio, BIO_USER_MAPPED);
/* /*
* subtle -- if bio_map_user_iov() ended up bouncing a bio, * Subtle: if we end up needing to bounce a bio, it would normally
* it would normally disappear when its bi_end_io is run. * disappear when its bi_end_io is run. However, we need the original
* however, we need it for the unmap, so grab an extra * bio for the unmap, so grab an extra reference to it
* reference to it
*/ */
bio_get(bio); bio_get(bio);
return bio;
bounce_bio = bio;
ret = blk_rq_append_bio(rq, &bounce_bio);
if (ret)
goto out_put_orig;
/*
* We link the bounce buffer in and could have to traverse it
* later, so we have to get a ref to prevent it from being freed
*/
bio_get(bounce_bio);
return 0;
out_put_orig:
bio_put(bio);
out_unmap: out_unmap:
bio_release_pages(bio, false); bio_release_pages(bio, false);
bio_put(bio); bio_put(bio);
return ERR_PTR(ret); return ret;
} }
/** /**
...@@ -558,44 +557,6 @@ int blk_rq_append_bio(struct request *rq, struct bio **bio) ...@@ -558,44 +557,6 @@ int blk_rq_append_bio(struct request *rq, struct bio **bio)
} }
EXPORT_SYMBOL(blk_rq_append_bio); EXPORT_SYMBOL(blk_rq_append_bio);
static int __blk_rq_map_user_iov(struct request *rq,
struct rq_map_data *map_data, struct iov_iter *iter,
gfp_t gfp_mask, bool copy)
{
struct request_queue *q = rq->q;
struct bio *bio, *orig_bio;
int ret;
if (copy)
bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
else
bio = bio_map_user_iov(q, iter, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
bio->bi_opf &= ~REQ_OP_MASK;
bio->bi_opf |= req_op(rq);
orig_bio = bio;
/*
* We link the bounce buffer in and could have to traverse it
* later so we have to get a ref to prevent it from being freed
*/
ret = blk_rq_append_bio(rq, &bio);
if (ret) {
if (copy)
bio_uncopy_user(orig_bio);
else
bio_unmap_user(orig_bio);
return ret;
}
bio_get(bio);
return 0;
}
/** /**
* blk_rq_map_user_iov - map user data to a request, for passthrough requests * blk_rq_map_user_iov - map user data to a request, for passthrough requests
* @q: request queue where request should be inserted * @q: request queue where request should be inserted
...@@ -639,7 +600,10 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, ...@@ -639,7 +600,10 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
i = *iter; i = *iter;
do { do {
ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); if (copy)
ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
else
ret = bio_map_user_iov(rq, &i, gfp_mask);
if (ret) if (ret)
goto unmap_rq; goto unmap_rq;
if (!bio) if (!bio)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment