Commit cf780a87 authored by Jens Axboe's avatar Jens Axboe

[PATCH] make mpage use bio_add_page()

This makes mpage use bio_add_page().  It has the added advantage that
users don't need to have deep knowledge about what the different bio
fields mean, nor does it have to abuse some of them while building the
bio.
parent f8b46092
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
* *
* 15May2002 akpm@zip.com.au * 15May2002 akpm@zip.com.au
* Initial version * Initial version
* 27Jun2002 axboe@suse.de
* use bio_add_page() to build bio's just the right size
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -22,12 +24,6 @@ ...@@ -22,12 +24,6 @@
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/pagevec.h> #include <linux/pagevec.h>
/*
* The largest-sized BIO which this code will assemble, in bytes. Set this
* to PAGE_CACHE_SIZE if your drivers are broken.
*/
#define MPAGE_BIO_MAX_SIZE BIO_MAX_SIZE
/* /*
* I/O completion handler for multipage BIOs. * I/O completion handler for multipage BIOs.
* *
...@@ -82,8 +78,6 @@ static void mpage_end_io_write(struct bio *bio) ...@@ -82,8 +78,6 @@ static void mpage_end_io_write(struct bio *bio)
struct bio *mpage_bio_submit(int rw, struct bio *bio) struct bio *mpage_bio_submit(int rw, struct bio *bio)
{ {
bio->bi_vcnt = bio->bi_idx;
bio->bi_idx = 0;
bio->bi_end_io = mpage_end_io_read; bio->bi_end_io = mpage_end_io_read;
if (rw == WRITE) if (rw == WRITE)
bio->bi_end_io = mpage_end_io_write; bio->bi_end_io = mpage_end_io_write;
...@@ -106,11 +100,7 @@ mpage_alloc(struct block_device *bdev, ...@@ -106,11 +100,7 @@ mpage_alloc(struct block_device *bdev,
if (bio) { if (bio) {
bio->bi_bdev = bdev; bio->bi_bdev = bdev;
bio->bi_vcnt = nr_vecs;
bio->bi_idx = 0;
bio->bi_size = 0;
bio->bi_sector = first_sector; bio->bi_sector = first_sector;
bio->bi_io_vec[0].bv_page = NULL;
} }
return bio; return bio;
} }
...@@ -169,7 +159,6 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, ...@@ -169,7 +159,6 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
const unsigned blkbits = inode->i_blkbits; const unsigned blkbits = inode->i_blkbits;
const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
const unsigned blocksize = 1 << blkbits; const unsigned blocksize = 1 << blkbits;
struct bio_vec *bvec;
sector_t block_in_file; sector_t block_in_file;
sector_t last_block; sector_t last_block;
sector_t blocks[MAX_BUF_PER_PAGE]; sector_t blocks[MAX_BUF_PER_PAGE];
...@@ -223,26 +212,22 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, ...@@ -223,26 +212,22 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
/* /*
* This page will go to BIO. Do we need to send this BIO off first? * This page will go to BIO. Do we need to send this BIO off first?
*/ */
if (bio && (bio->bi_idx == bio->bi_vcnt || if (bio && (*last_block_in_bio != blocks[0] - 1))
*last_block_in_bio != blocks[0] - 1))
bio = mpage_bio_submit(READ, bio); bio = mpage_bio_submit(READ, bio);
alloc_new:
if (bio == NULL) { if (bio == NULL) {
unsigned nr_bvecs = MPAGE_BIO_MAX_SIZE / PAGE_CACHE_SIZE;
if (nr_bvecs > nr_pages)
nr_bvecs = nr_pages;
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
nr_bvecs, GFP_KERNEL); nr_pages, GFP_KERNEL);
if (bio == NULL) if (bio == NULL)
goto confused; goto confused;
} }
bvec = &bio->bi_io_vec[bio->bi_idx++]; if (bio_add_page(bio, page, first_hole << blkbits, 0)) {
bvec->bv_page = page; bio = mpage_bio_submit(READ, bio);
bvec->bv_len = (first_hole << blkbits); goto alloc_new;
bvec->bv_offset = 0; }
bio->bi_size += bvec->bv_len;
if (buffer_boundary(&bh) || (first_hole != blocks_per_page)) if (buffer_boundary(&bh) || (first_hole != blocks_per_page))
bio = mpage_bio_submit(READ, bio); bio = mpage_bio_submit(READ, bio);
else else
...@@ -330,7 +315,6 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, ...@@ -330,7 +315,6 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
const unsigned blkbits = inode->i_blkbits; const unsigned blkbits = inode->i_blkbits;
unsigned long end_index; unsigned long end_index;
const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
struct bio_vec *bvec;
sector_t last_block; sector_t last_block;
sector_t block_in_file; sector_t block_in_file;
sector_t blocks[MAX_BUF_PER_PAGE]; sector_t blocks[MAX_BUF_PER_PAGE];
...@@ -432,15 +416,15 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, ...@@ -432,15 +416,15 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
/* /*
* This page will go to BIO. Do we need to send this BIO off first? * This page will go to BIO. Do we need to send this BIO off first?
*/ */
if (bio && (bio->bi_idx == bio->bi_vcnt || if (bio && *last_block_in_bio != blocks[0] - 1)
*last_block_in_bio != blocks[0] - 1))
bio = mpage_bio_submit(WRITE, bio); bio = mpage_bio_submit(WRITE, bio);
alloc_new:
if (bio == NULL) { if (bio == NULL) {
unsigned nr_bvecs = MPAGE_BIO_MAX_SIZE / PAGE_CACHE_SIZE; const unsigned __nr_pages = 64; /* FIXME */
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
nr_bvecs, GFP_NOFS|__GFP_HIGH); __nr_pages, GFP_NOFS|__GFP_HIGH);
if (bio == NULL) if (bio == NULL)
goto confused; goto confused;
} }
...@@ -465,11 +449,11 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, ...@@ -465,11 +449,11 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
try_to_free_buffers(page); try_to_free_buffers(page);
} }
bvec = &bio->bi_io_vec[bio->bi_idx++]; if (bio_add_page(bio, page, first_unmapped << blkbits, 0)) {
bvec->bv_page = page; bio = mpage_bio_submit(WRITE, bio);
bvec->bv_len = (first_unmapped << blkbits); goto alloc_new;
bvec->bv_offset = 0; }
bio->bi_size += bvec->bv_len;
BUG_ON(PageWriteback(page)); BUG_ON(PageWriteback(page));
SetPageWriteback(page); SetPageWriteback(page);
unlock_page(page); unlock_page(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment