Commit ed9832bc authored by Kundan Kumar's avatar Kundan Kumar Committed by Jens Axboe

block: introduce folio awareness and add a bigger size from folio

Add a bigger size from folio to bio and skip merge processing for pages.

Fetch the offset of page within a folio. Depending on the size of folio
and folio_offset, fetch a larger length. This length may consist of
multiple contiguous pages if folio is multiorder.

Using the length calculate number of pages which will be added to bio and
increment the loop counter to skip those pages.

This technique helps to avoid overhead of merging pages which belong to
same large order folio.

Also folio-ize the functions bio_iov_add_page() and
bio_iov_add_zone_append_page()
Signed-off-by: default avatarKundan Kumar <kundan.kumar@samsung.com>
Tested-by: default avatarLuis Chamberlain <mcgrof@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Link: https://lore.kernel.org/r/20240911064935.5630-3-kundan.kumar@samsung.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7de98954
...@@ -931,7 +931,8 @@ static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page, ...@@ -931,7 +931,8 @@ static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
if (!zone_device_pages_have_same_pgmap(bv->bv_page, page)) if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
return false; return false;
*same_page = ((vec_end_addr & PAGE_MASK) == page_addr); *same_page = ((vec_end_addr & PAGE_MASK) == ((page_addr + off) &
PAGE_MASK));
if (!*same_page) { if (!*same_page) {
if (IS_ENABLED(CONFIG_KMSAN)) if (IS_ENABLED(CONFIG_KMSAN))
return false; return false;
...@@ -1227,8 +1228,8 @@ void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) ...@@ -1227,8 +1228,8 @@ void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
bio_set_flag(bio, BIO_CLONED); bio_set_flag(bio, BIO_CLONED);
} }
static int bio_iov_add_page(struct bio *bio, struct page *page, static int bio_iov_add_folio(struct bio *bio, struct folio *folio, size_t len,
unsigned int len, unsigned int offset) size_t offset)
{ {
bool same_page = false; bool same_page = false;
...@@ -1237,30 +1238,61 @@ static int bio_iov_add_page(struct bio *bio, struct page *page, ...@@ -1237,30 +1238,61 @@ static int bio_iov_add_page(struct bio *bio, struct page *page,
if (bio->bi_vcnt > 0 && if (bio->bi_vcnt > 0 &&
bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
page, len, offset, &same_page)) { folio_page(folio, 0), len, offset,
&same_page)) {
bio->bi_iter.bi_size += len; bio->bi_iter.bi_size += len;
if (same_page) if (same_page)
bio_release_page(bio, page); bio_release_page(bio, folio_page(folio, 0));
return 0; return 0;
} }
__bio_add_page(bio, page, len, offset); bio_add_folio_nofail(bio, folio, len, offset);
return 0; return 0;
} }
static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page, static int bio_iov_add_zone_append_folio(struct bio *bio, struct folio *folio,
unsigned int len, unsigned int offset) size_t len, size_t offset)
{ {
struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct request_queue *q = bdev_get_queue(bio->bi_bdev);
bool same_page = false; bool same_page = false;
if (bio_add_hw_page(q, bio, page, len, offset, if (bio_add_hw_folio(q, bio, folio, len, offset,
queue_max_zone_append_sectors(q), &same_page) != len) queue_max_zone_append_sectors(q), &same_page) != len)
return -EINVAL; return -EINVAL;
if (same_page) if (same_page)
bio_release_page(bio, page); bio_release_page(bio, folio_page(folio, 0));
return 0; return 0;
} }
static unsigned int get_contig_folio_len(unsigned int *num_pages,
struct page **pages, unsigned int i,
struct folio *folio, size_t left,
size_t offset)
{
size_t bytes = left;
size_t contig_sz = min_t(size_t, PAGE_SIZE - offset, bytes);
unsigned int j;
/*
* We might COW a single page in the middle of
* a large folio, so we have to check that all
* pages belong to the same folio.
*/
bytes -= contig_sz;
for (j = i + 1; j < i + *num_pages; j++) {
size_t next = min_t(size_t, PAGE_SIZE, bytes);
if (page_folio(pages[j]) != folio ||
pages[j] != pages[j - 1] + 1) {
break;
}
contig_sz += next;
bytes -= next;
}
*num_pages = j - i;
return contig_sz;
}
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
/** /**
...@@ -1280,9 +1312,9 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) ...@@ -1280,9 +1312,9 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv; struct page **pages = (struct page **)bv;
ssize_t size, left; ssize_t size;
unsigned len, i = 0; unsigned int num_pages, i = 0;
size_t offset; size_t offset, folio_offset, left, len;
int ret = 0; int ret = 0;
/* /*
...@@ -1322,17 +1354,28 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) ...@@ -1322,17 +1354,28 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
goto out; goto out;
} }
for (left = size, i = 0; left > 0; left -= len, i++) { for (left = size, i = 0; left > 0; left -= len, i += num_pages) {
struct page *page = pages[i]; struct page *page = pages[i];
struct folio *folio = page_folio(page);
folio_offset = ((size_t)folio_page_idx(folio, page) <<
PAGE_SHIFT) + offset;
len = min(folio_size(folio) - folio_offset, left);
num_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
if (num_pages > 1)
len = get_contig_folio_len(&num_pages, pages, i,
folio, left, offset);
len = min_t(size_t, PAGE_SIZE - offset, left);
if (bio_op(bio) == REQ_OP_ZONE_APPEND) { if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
ret = bio_iov_add_zone_append_page(bio, page, len, ret = bio_iov_add_zone_append_folio(bio, folio, len,
offset); folio_offset);
if (ret) if (ret)
break; break;
} else } else
bio_iov_add_page(bio, page, len, offset); bio_iov_add_folio(bio, folio, len, folio_offset);
offset = 0; offset = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment