Commit 3babc504 authored by Jens Axboe's avatar Jens Axboe

[PATCH] Update md to new i/o completions

parent 5f761bd3
......@@ -144,7 +144,7 @@ static int linear_make_request (request_queue_t *q, struct bio *bio)
if (!hash->dev1) {
printk ("linear_make_request : hash->dev1==NULL for block %ld\n",
block);
bio_io_error(bio);
bio_io_error(bio, bio->bi_size);
return 0;
}
tmp_dev = hash->dev1;
......@@ -154,7 +154,7 @@ static int linear_make_request (request_queue_t *q, struct bio *bio)
if (block >= (tmp_dev->size + tmp_dev->offset)
|| block < tmp_dev->offset) {
printk ("linear_make_request: Block %ld out of bounds on dev %s size %ld offset %ld\n", block, bdevname(tmp_dev->rdev->bdev), tmp_dev->size, tmp_dev->offset);
bio_io_error(bio);
bio_io_error(bio, bio->bi_size);
return 0;
}
bio->bi_bdev = tmp_dev->rdev->bdev;
......
......@@ -144,7 +144,7 @@ static mddev_t *mddev_map[MAX_MD_DEVS];
static int md_fail_request (request_queue_t *q, struct bio *bio)
{
bio_io_error(bio);
bio_io_error(bio, bio->bi_size);
return 0;
}
......@@ -361,9 +361,13 @@ static void free_disk_sb(mdk_rdev_t * rdev)
}
static void bi_complete(struct bio *bio)
static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
{
if (bio->bi_size)
return 1;
complete((struct completion*)bio->bi_private);
return 0;
}
static int sync_page_io(struct block_device *bdev, sector_t sector, int size,
......
......@@ -109,17 +109,20 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int uptodate)
struct bio *bio = mp_bh->master_bio;
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
bio_endio(bio, uptodate);
bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
mempool_free(mp_bh, conf->pool);
}
void multipath_end_request(struct bio *bio)
int multipath_end_request(struct bio *bio, unsigned int bytes_done, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
if (bio->bi_size)
return 1;
if (uptodate)
multipath_end_bh_io(mp_bh, uptodate);
else {
......@@ -132,7 +135,7 @@ void multipath_end_request(struct bio *bio)
multipath_reschedule_retry(mp_bh);
}
atomic_dec(&rdev->nr_pending);
return;
return 0;
}
/*
......
......@@ -323,7 +323,7 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
bad_zone1:
printk ("raid0_make_request bug: hash->zone1==NULL for block %ld\n", block);
outerr:
bio_io_error(bio);
bio_io_error(bio, bio->bi_size);
return 0;
}
......
......@@ -236,7 +236,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio, int uptodate)
{
struct bio *bio = r1_bio->master_bio;
bio_endio(bio, uptodate);
bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
free_r1bio(r1_bio);
}
......@@ -251,12 +251,15 @@ static void inline update_head_pos(int disk, r1bio_t *r1_bio)
r1_bio->sector + (r1_bio->master_bio->bi_size >> 9);
}
static void end_request(struct bio *bio)
static int end_request(struct bio *bio, unsigned int bytes_done, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int mirror;
conf_t *conf = mddev_to_conf(r1_bio->mddev);
if (bio->bi_size)
return 1;
if (r1_bio->cmd == READ || r1_bio->cmd == READA)
mirror = r1_bio->read_disk;
......@@ -313,6 +316,7 @@ static void end_request(struct bio *bio)
raid_end_bio_io(r1_bio, uptodate);
}
atomic_dec(&conf->mirrors[mirror].rdev->nr_pending);
return 0;
}
/*
......@@ -748,12 +752,15 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
#define REDIRECT_SECTOR KERN_ERR \
"raid1: %s: redirecting sector %lu to another mirror\n"
static void end_sync_read(struct bio *bio)
static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
conf_t *conf = mddev_to_conf(r1_bio->mddev);
if (bio->bi_size)
return 1;
if (r1_bio->read_bio != bio)
BUG();
update_head_pos(r1_bio->read_disk, r1_bio);
......@@ -769,9 +776,10 @@ static void end_sync_read(struct bio *bio)
set_bit(R1BIO_Uptodate, &r1_bio->state);
atomic_dec(&conf->mirrors[r1_bio->read_disk].rdev->nr_pending);
reschedule_retry(r1_bio);
return 0;
}
static void end_sync_write(struct bio *bio)
static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
......@@ -780,6 +788,9 @@ static void end_sync_write(struct bio *bio)
int i;
int mirror=0;
if (bio->bi_size)
return 1;
for (i = 0; i < conf->raid_disks; i++)
if (r1_bio->write_bios[i] == bio) {
mirror = i;
......@@ -795,6 +806,7 @@ static void end_sync_write(struct bio *bio)
put_buf(r1_bio);
}
atomic_dec(&conf->mirrors[mirror].rdev->nr_pending);
return 0;
}
static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
......
......@@ -321,13 +321,17 @@ static void shrink_stripes(raid5_conf_t *conf)
conf->slab_cache = NULL;
}
static void raid5_end_read_request (struct bio * bi)
static int raid5_end_read_request (struct bio * bi, unsigned int bytes_done,
int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
int disks = conf->raid_disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
if (bi->bi_size)
return 1;
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
break;
......@@ -335,7 +339,7 @@ static void raid5_end_read_request (struct bio * bi)
PRINTK("end_read_request %lu/%d, count: %d, uptodate %d.\n", sh->sector, i, atomic_read(&sh->count), uptodate);
if (i == disks) {
BUG();
return;
return 0;
}
if (uptodate) {
......@@ -384,9 +388,11 @@ static void raid5_end_read_request (struct bio * bi)
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
return 0;
}
static void raid5_end_write_request (struct bio *bi)
static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
int error)
{
struct stripe_head *sh = bi->bi_private;
raid5_conf_t *conf = sh->raid_conf;
......@@ -394,6 +400,9 @@ static void raid5_end_write_request (struct bio *bi)
unsigned long flags;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
if (bi->bi_size)
return 1;
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
break;
......@@ -401,7 +410,7 @@ static void raid5_end_write_request (struct bio *bi)
PRINTK("end_write_request %lu/%d, count %d, uptodate: %d.\n", sh->sector, i, atomic_read(&sh->count), uptodate);
if (i == disks) {
BUG();
return;
return 0;
}
spin_lock_irqsave(&conf->device_lock, flags);
......@@ -414,6 +423,7 @@ static void raid5_end_write_request (struct bio *bi)
set_bit(STRIPE_HANDLE, &sh->state);
__release_stripe(conf, sh);
spin_unlock_irqrestore(&conf->device_lock, flags);
return 0;
}
......@@ -1135,9 +1145,12 @@ static void handle_stripe(struct stripe_head *sh)
spin_unlock(&sh->lock);
while ((bi=return_bi)) {
int bytes = bi->bi_size;
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_end_io(bi);
bi->bi_size = 0;
bi->bi_end_io(bi, bytes, 0);
}
for (i=disks; i-- ;)
if (sh->dev[i].flags & ((1<<R5_Wantwrite)|(1<<R5_Wantread))) {
......@@ -1236,7 +1249,6 @@ static int make_request (request_queue_t *q, struct bio * bi)
last_sector = bi->bi_sector + (bi->bi_size>>9);
bi->bi_next = NULL;
set_bit(BIO_UPTODATE, &bi->bi_flags); /* will be cleared if error detected */
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
......@@ -1257,8 +1269,12 @@ static int make_request (request_queue_t *q, struct bio * bi)
}
}
spin_lock_irq(&conf->device_lock);
if (--bi->bi_phys_segments == 0)
bi->bi_end_io(bi);
if (--bi->bi_phys_segments == 0) {
int bytes = bi->bi_size;
bi->bi_size = 0;
bi->bi_end_io(bi, bytes, 0);
}
spin_unlock_irq(&conf->device_lock);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment