ide: add ide_end_rq() (v2)

* Move request dequeuing from __ide_end_request() to ide_end_request().

* Rename __ide_end_request() to ide_end_rq() and export it.

* Fix ide_end_rq() to pass original blk_end_request() return value.

* ide_end_dequeued_request() is used only in cdrom_end_request()
  so inline it there and then remove the function.

v2:
* Remove needless BUG_ON() while at it (start_request()'s one is enough).

There should be no functional changes caused by this patch.
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <bzolnier@gmail.com>
parent 1713788f
......@@ -272,8 +272,8 @@ static void cdrom_end_request(ide_drive_t *drive, int uptodate)
* now end the failed request
*/
if (blk_fs_request(failed)) {
if (ide_end_dequeued_request(drive, failed, 0,
failed->hard_nr_sectors))
if (ide_end_rq(drive, failed, 0,
failed->hard_nr_sectors << 9))
BUG();
} else {
if (blk_end_request(failed, -EIO,
......
......@@ -54,10 +54,9 @@
#include <asm/uaccess.h>
#include <asm/io.h>
static int __ide_end_request(ide_drive_t *drive, struct request *rq,
int uptodate, unsigned int nr_bytes, int dequeue)
int ide_end_rq(ide_drive_t *drive, struct request *rq, int uptodate,
unsigned int nr_bytes)
{
int ret = 1;
int error = 0;
if (uptodate <= 0)
......@@ -83,14 +82,9 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
ide_dma_on(drive);
}
if (!blk_end_request(rq, error, nr_bytes))
ret = 0;
if (ret == 0 && dequeue)
drive->hwif->rq = NULL;
return ret;
return blk_end_request(rq, error, nr_bytes);
}
EXPORT_SYMBOL_GPL(ide_end_rq);
/**
* ide_end_request - complete an IDE I/O
......@@ -107,6 +101,7 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
{
unsigned int nr_bytes = nr_sectors << 9;
struct request *rq = drive->hwif->rq;
int rc;
if (!nr_bytes) {
if (blk_pc_request(rq))
......@@ -115,33 +110,13 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
nr_bytes = rq->hard_cur_sectors << 9;
}
return __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
}
EXPORT_SYMBOL(ide_end_request);
/**
* ide_end_dequeued_request - complete an IDE I/O
* @drive: IDE device for the I/O
* @uptodate:
* @nr_sectors: number of sectors completed
*
* Complete an I/O that is no longer on the request queue. This
* typically occurs when we pull the request and issue a REQUEST_SENSE.
* We must still finish the old request but we must not tamper with the
* queue in the meantime.
*
* NOTE: This path does not handle barrier, but barrier is not supported
* on ide-cd anyway.
*/
int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
int uptodate, int nr_sectors)
{
BUG_ON(!blk_rq_started(rq));
rc = ide_end_rq(drive, rq, uptodate, nr_bytes);
if (rc == 0)
drive->hwif->rq = NULL;
return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
return rc;
}
EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
EXPORT_SYMBOL(ide_end_request);
void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
{
......
......@@ -1131,8 +1131,8 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l
extern int ide_vlb_clk;
extern int ide_pci_clk;
int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
int ide_end_request(ide_drive_t *, int, int);
int ide_end_dequeued_request(ide_drive_t *, struct request *, int, int);
void ide_kill_rq(ide_drive_t *, struct request *);
void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment