Commit 5e36bb6e authored by Kiyoshi Ueda's avatar Kiyoshi Ueda Committed by Jens Axboe

blk_end_request: changing ide normal caller (take 4)

This patch converts "normal" parts of ide to use blk_end_request
interfaces.  Related 'uptodate' arguments are converted to 'error'.

The conversion of 'uptodate' to 'error' is done only for the internal
function, __ide_end_request().
ide_end_request() was not changed since it's exported and used
by many ide drivers.

With this patch, blkdev_dequeue_request() in __ide_end_request() is
moved to blk_end_request, since blk_end_request takes care of
dequeueing request like below:

	if (!list_empty(&rq->queuelist))
		blkdev_dequeue_request(rq);

In the case of ide,
  o 'dequeue' variable of __ide_end_request() is 1 only when the request
    is still linked to the queue (i.e. rq->queuelist is not empty)
  o 'dequeue' variable of __ide_end_request() is 0 only when the request
    has already been removed from the queue (i.e. rq->queuelist is empty)
So blk_end_request can handle it correctly although ide always run
thought the code above.

Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Signed-off-by: default avatarKiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: default avatarJun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent ea6f06f4
...@@ -655,9 +655,9 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate) ...@@ -655,9 +655,9 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
BUG(); BUG();
} else { } else {
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&ide_lock, flags);
end_that_request_chunk(failed, 0, if (__blk_end_request(failed, -EIO,
failed->data_len); failed->data_len))
end_that_request_last(failed, 0); BUG();
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
} }
} else } else
......
...@@ -58,15 +58,19 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq, ...@@ -58,15 +58,19 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
int uptodate, unsigned int nr_bytes, int dequeue) int uptodate, unsigned int nr_bytes, int dequeue)
{ {
int ret = 1; int ret = 1;
int error = 0;
if (uptodate <= 0)
error = uptodate ? uptodate : -EIO;
/* /*
* if failfast is set on a request, override number of sectors and * if failfast is set on a request, override number of sectors and
* complete the whole request right now * complete the whole request right now
*/ */
if (blk_noretry_request(rq) && end_io_error(uptodate)) if (blk_noretry_request(rq) && error)
nr_bytes = rq->hard_nr_sectors << 9; nr_bytes = rq->hard_nr_sectors << 9;
if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) if (!blk_fs_request(rq) && error && !rq->errors)
rq->errors = -EIO; rq->errors = -EIO;
/* /*
...@@ -78,14 +82,9 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq, ...@@ -78,14 +82,9 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
ide_dma_on(drive); ide_dma_on(drive);
} }
if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { if (!__blk_end_request(rq, error, nr_bytes)) {
add_disk_randomness(rq->rq_disk); if (dequeue)
if (dequeue) {
if (!list_empty(&rq->queuelist))
blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL; HWGROUP(drive)->rq = NULL;
}
end_that_request_last(rq, uptodate);
ret = 0; ret = 0;
} }
...@@ -290,9 +289,9 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) ...@@ -290,9 +289,9 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
drive->blocked = 0; drive->blocked = 0;
blk_start_queue(drive->queue); blk_start_queue(drive->queue);
} }
blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL; HWGROUP(drive)->rq = NULL;
end_that_request_last(rq, 1); if (__blk_end_request(rq, 0, 0))
BUG();
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
} }
...@@ -387,10 +386,10 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) ...@@ -387,10 +386,10 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
} }
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&ide_lock, flags);
blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL; HWGROUP(drive)->rq = NULL;
rq->errors = err; rq->errors = err;
end_that_request_last(rq, !rq->errors); if (__blk_end_request(rq, (rq->errors ? -EIO : 0), 0))
BUG();
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment