Commit d74c6d51 authored by Kent Overstreet's avatar Kent Overstreet

block: Add bio_for_each_segment_all()

__bio_for_each_segment() iterates bvecs from the specified index
instead of bio->bv_idx.  Currently, the only usage is to walk all the
bvecs after the bio has been advanced by specifying 0 index.

For immutable bvecs, we need to split these apart;
bio_for_each_segment() is going to have a different implementation.
This will also help document the intent of code that's using it -
bio_for_each_segment_all() is only legal to use for code that owns the
bio.
Signed-off-by: default avatarKent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
CC: Neil Brown <neilb@suse.de>
CC: Boaz Harrosh <bharrosh@panasas.com>
parent 6bc454d1
...@@ -952,7 +952,7 @@ static struct bio *bio_clone_range(struct bio *bio_src, ...@@ -952,7 +952,7 @@ static struct bio *bio_clone_range(struct bio *bio_src,
/* Find first affected segment... */ /* Find first affected segment... */
resid = offset; resid = offset;
__bio_for_each_segment(bv, bio_src, idx, 0) { bio_for_each_segment(bv, bio_src, idx) {
if (resid < bv->bv_len) if (resid < bv->bv_len)
break; break;
resid -= bv->bv_len; resid -= bv->bv_len;
......
...@@ -1291,7 +1291,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1291,7 +1291,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
* know the original bi_idx, so we just free * know the original bi_idx, so we just free
* them all * them all
*/ */
__bio_for_each_segment(bvec, mbio, j, 0) bio_for_each_segment_all(bvec, mbio, j)
bvec->bv_page = r1_bio->behind_bvecs[j].bv_page; bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
atomic_inc(&r1_bio->behind_remaining); atomic_inc(&r1_bio->behind_remaining);
......
...@@ -961,7 +961,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, ...@@ -961,7 +961,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
int iov_idx = 0; int iov_idx = 0;
unsigned int iov_off = 0; unsigned int iov_off = 0;
__bio_for_each_segment(bvec, bio, i, 0) { bio_for_each_segment_all(bvec, bio, i) {
char *bv_addr = page_address(bvec->bv_page); char *bv_addr = page_address(bvec->bv_page);
unsigned int bv_len = iovecs[i].bv_len; unsigned int bv_len = iovecs[i].bv_len;
...@@ -1143,7 +1143,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, ...@@ -1143,7 +1143,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
return bio; return bio;
cleanup: cleanup:
if (!map_data) if (!map_data)
bio_for_each_segment(bvec, bio, i) bio_for_each_segment_all(bvec, bio, i)
__free_page(bvec->bv_page); __free_page(bvec->bv_page);
bio_put(bio); bio_put(bio);
...@@ -1357,7 +1357,7 @@ static void __bio_unmap_user(struct bio *bio) ...@@ -1357,7 +1357,7 @@ static void __bio_unmap_user(struct bio *bio)
/* /*
* make sure we dirty pages we wrote to * make sure we dirty pages we wrote to
*/ */
__bio_for_each_segment(bvec, bio, i, 0) { bio_for_each_segment_all(bvec, bio, i) {
if (bio_data_dir(bio) == READ) if (bio_data_dir(bio) == READ)
set_page_dirty_lock(bvec->bv_page); set_page_dirty_lock(bvec->bv_page);
...@@ -1463,7 +1463,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err) ...@@ -1463,7 +1463,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
int i; int i;
char *p = bmd->sgvecs[0].iov_base; char *p = bmd->sgvecs[0].iov_base;
__bio_for_each_segment(bvec, bio, i, 0) { bio_for_each_segment_all(bvec, bio, i) {
char *addr = page_address(bvec->bv_page); char *addr = page_address(bvec->bv_page);
int len = bmd->iovecs[i].bv_len; int len = bmd->iovecs[i].bv_len;
...@@ -1503,7 +1503,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, ...@@ -1503,7 +1503,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
if (!reading) { if (!reading) {
void *p = data; void *p = data;
bio_for_each_segment(bvec, bio, i) { bio_for_each_segment_all(bvec, bio, i) {
char *addr = page_address(bvec->bv_page); char *addr = page_address(bvec->bv_page);
memcpy(addr, p, bvec->bv_len); memcpy(addr, p, bvec->bv_len);
...@@ -1789,7 +1789,7 @@ sector_t bio_sector_offset(struct bio *bio, unsigned short index, ...@@ -1789,7 +1789,7 @@ sector_t bio_sector_offset(struct bio *bio, unsigned short index,
if (index >= bio->bi_idx) if (index >= bio->bi_idx)
index = bio->bi_vcnt - 1; index = bio->bi_vcnt - 1;
__bio_for_each_segment(bv, bio, i, 0) { bio_for_each_segment_all(bv, bio, i) {
if (i == index) { if (i == index) {
if (offset > bv->bv_offset) if (offset > bv->bv_offset)
sectors += (offset - bv->bv_offset) / sector_sz; sectors += (offset - bv->bv_offset) / sector_sz;
......
...@@ -401,7 +401,7 @@ static void _clear_bio(struct bio *bio) ...@@ -401,7 +401,7 @@ static void _clear_bio(struct bio *bio)
struct bio_vec *bv; struct bio_vec *bv;
unsigned i; unsigned i;
__bio_for_each_segment(bv, bio, i, 0) { bio_for_each_segment_all(bv, bio, i) {
unsigned this_count = bv->bv_len; unsigned this_count = bv->bv_len;
if (likely(PAGE_SIZE == this_count)) if (likely(PAGE_SIZE == this_count))
......
...@@ -432,7 +432,7 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret) ...@@ -432,7 +432,7 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
if (!bio) if (!bio)
continue; continue;
__bio_for_each_segment(bv, bio, i, 0) { bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page; struct page *page = bv->bv_page;
SetPageUptodate(page); SetPageUptodate(page);
......
...@@ -137,16 +137,27 @@ static inline int bio_has_allocated_vec(struct bio *bio) ...@@ -137,16 +137,27 @@ static inline int bio_has_allocated_vec(struct bio *bio)
#define bio_io_error(bio) bio_endio((bio), -EIO) #define bio_io_error(bio) bio_endio((bio), -EIO)
/* /*
* drivers should not use the __ version unless they _really_ want to * drivers should not use the __ version unless they _really_ know what
* run through the entire bio and not just pending pieces * they're doing
*/ */
#define __bio_for_each_segment(bvl, bio, i, start_idx) \ #define __bio_for_each_segment(bvl, bio, i, start_idx) \
for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \ for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
i < (bio)->bi_vcnt; \ i < (bio)->bi_vcnt; \
bvl++, i++) bvl++, i++)
/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
#define bio_for_each_segment_all(bvl, bio, i) \
for (i = 0; \
bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
i++)
#define bio_for_each_segment(bvl, bio, i) \ #define bio_for_each_segment(bvl, bio, i) \
__bio_for_each_segment(bvl, bio, i, (bio)->bi_idx) for (i = (bio)->bi_idx; \
bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
i++)
/* /*
* get a reference to a bio, so it won't disappear. the intended use is * get a reference to a bio, so it won't disappear. the intended use is
......
...@@ -134,7 +134,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) ...@@ -134,7 +134,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
/* /*
* free up bounce indirect pages used * free up bounce indirect pages used
*/ */
__bio_for_each_segment(bvec, bio, i, 0) { bio_for_each_segment_all(bvec, bio, i) {
org_vec = bio_orig->bi_io_vec + i; org_vec = bio_orig->bi_io_vec + i;
if (bvec->bv_page == org_vec->bv_page) if (bvec->bv_page == org_vec->bv_page)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment