Commit 79d08f89 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: fix .bi_size overflow

'bio->bi_iter.bi_size' is 'unsigned int', which at most hold 4G - 1
bytes.

Before 07173c3e ("block: enable multipage bvecs"), one bio can
include very limited pages, and usually at most 256, so the fs bio
size won't be bigger than 1M bytes most of times.

Since we support multi-page bvec, in theory one fs bio really can
be added > 1M pages, especially in case of hugepage, or big writeback
with too many dirty pages. Then there is chance in which .bi_size
is overflowed.

Fixes this issue by using bio_full() to check if the added segment may
overflow .bi_size.

Cc: Liu Yiding <liuyd.fnst@cn.fujitsu.com>
Cc: kernel test robot <rong.a.chen@intel.com>
Cc: "Darrick J. Wong" <darrick.wong@oracle.com>
Cc: linux-xfs@vger.kernel.org
Cc: linux-fsdevel@vger.kernel.org
Cc: stable@vger.kernel.org
Fixes: 07173c3e ("block: enable multipage bvecs")
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5be1f9d8
......@@ -723,7 +723,7 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
}
}
if (bio_full(bio))
if (bio_full(bio, len))
return 0;
if (bio->bi_vcnt >= queue_max_segments(q))
......@@ -797,7 +797,7 @@ void __bio_add_page(struct bio *bio, struct page *page,
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
WARN_ON_ONCE(bio_full(bio));
WARN_ON_ONCE(bio_full(bio, len));
bv->bv_page = page;
bv->bv_offset = off;
......@@ -824,7 +824,7 @@ int bio_add_page(struct bio *bio, struct page *page,
bool same_page = false;
if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
if (bio_full(bio))
if (bio_full(bio, len))
return 0;
__bio_add_page(bio, page, len, offset);
}
......@@ -909,7 +909,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
if (same_page)
put_page(page);
} else {
if (WARN_ON_ONCE(bio_full(bio)))
if (WARN_ON_ONCE(bio_full(bio, len)))
return -EINVAL;
__bio_add_page(bio, page, len, offset);
}
......@@ -953,7 +953,7 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
ret = __bio_iov_bvec_add_pages(bio, iter);
else
ret = __bio_iov_iter_get_pages(bio, iter);
} while (!ret && iov_iter_count(iter) && !bio_full(bio));
} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
if (is_bvec)
bio_set_flag(bio, BIO_NO_PAGE_REF);
......
......@@ -333,7 +333,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (iop)
atomic_inc(&iop->read_count);
if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
......
......@@ -782,7 +782,7 @@ xfs_add_to_ioend(
atomic_inc(&iop->write_count);
if (!merged) {
if (bio_full(wpc->ioend->io_bio))
if (bio_full(wpc->ioend->io_bio, len))
xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
bio_add_page(wpc->ioend->io_bio, page, len, poff);
}
......
......@@ -102,9 +102,23 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
static inline bool bio_full(struct bio *bio)
/**
* bio_full - check if the bio is full
* @bio: bio to check
* @len: length of one segment to be added
*
* Return true if @bio is full and one segment with @len bytes can't be
* added to the bio, otherwise return false
*/
static inline bool bio_full(struct bio *bio, unsigned len)
{
return bio->bi_vcnt >= bio->bi_max_vecs;
if (bio->bi_vcnt >= bio->bi_max_vecs)
return true;
if (bio->bi_iter.bi_size > UINT_MAX - len)
return true;
return false;
}
static inline bool bio_next_segment(const struct bio *bio,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment