Commit 278bdd1c authored by Jens Axboe's avatar Jens Axboe

[PATCH] add end_request helpers that deal in bytes, not sectors

This adds an end_that_request_chunk() helper, that puts the core
functionality in __end_that_request_first().  This one deals in bytes.
end_that_request_first() does the 512 multiply, and then
end_that_request_chunk() can just use it directly.

This enables ide-cd to do proper dma in sizes that are not sector
aligned.  Some of the most important CD functions (ripping audio dma,
burning audio cds, burning raw cds) _require_ this or we will be in pio.
That stinks.  We simply cannot use pio for these, not on writers much
that are fast!
parent 6f04a530
...@@ -1958,6 +1958,9 @@ inline void blk_recalc_rq_segments(struct request *rq) ...@@ -1958,6 +1958,9 @@ inline void blk_recalc_rq_segments(struct request *rq)
struct bio *bio; struct bio *bio;
int nr_phys_segs, nr_hw_segs; int nr_phys_segs, nr_hw_segs;
if (!rq->bio)
return;
rq->buffer = bio_data(rq->bio); rq->buffer = bio_data(rq->bio);
nr_phys_segs = nr_hw_segs = 0; nr_phys_segs = nr_hw_segs = 0;
...@@ -1975,7 +1978,7 @@ inline void blk_recalc_rq_segments(struct request *rq) ...@@ -1975,7 +1978,7 @@ inline void blk_recalc_rq_segments(struct request *rq)
inline void blk_recalc_rq_sectors(struct request *rq, int nsect) inline void blk_recalc_rq_sectors(struct request *rq, int nsect)
{ {
if (rq->bio) { if (blk_fs_request(rq)) {
rq->hard_sector += nsect; rq->hard_sector += nsect;
rq->nr_sectors = rq->hard_nr_sectors -= nsect; rq->nr_sectors = rq->hard_nr_sectors -= nsect;
rq->sector = rq->hard_sector; rq->sector = rq->hard_sector;
...@@ -1994,27 +1997,19 @@ inline void blk_recalc_rq_sectors(struct request *rq, int nsect) ...@@ -1994,27 +1997,19 @@ inline void blk_recalc_rq_sectors(struct request *rq, int nsect)
} }
} }
/** static int __end_that_request_first(struct request *req, int uptodate,
* end_that_request_first - end I/O on one buffer. int nr_bytes)
* @req: the request being processed
* @uptodate: 0 for I/O error
* @nr_sectors: number of sectors to end I/O on
*
* Description:
* Ends I/O on a number of sectors attached to @req, and sets it up
* for the next range of segments (if any) in the cluster.
*
* Return:
* 0 - we are done with this request, call end_that_request_last()
* 1 - still buffers pending for this request
**/
int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
{ {
int total_nsect = 0, error = 0; int total_bytes, bio_nbytes, error = 0, next_idx = 0;
struct bio *bio; struct bio *bio;
/*
* for a REQ_BLOCK_PC request, we want to carry any eventual
* sense key with us all the way through
*/
if (!blk_pc_request(req))
req->errors = 0; req->errors = 0;
if (!uptodate) { if (!uptodate) {
error = -EIO; error = -EIO;
if (!(req->flags & REQ_QUIET)) if (!(req->flags & REQ_QUIET))
...@@ -2023,56 +2018,56 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors) ...@@ -2023,56 +2018,56 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
(unsigned long long)req->sector); (unsigned long long)req->sector);
} }
total_bytes = bio_nbytes = 0;
while ((bio = req->bio)) { while ((bio = req->bio)) {
int new_bio = 0, nsect; int nbytes;
if (nr_bytes >= bio->bi_size) {
req->bio = bio->bi_next;
nbytes = bio->bi_size;
bio_endio(bio, nbytes, error);
next_idx = 0;
bio_nbytes = 0;
} else {
int idx = bio->bi_idx + next_idx;
if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
printk("%s: bio idx %d >= vcnt %d\n", __FUNCTION__, blk_dump_rq_flags(req, "__end_that");
printk("%s: bio idx %d >= vcnt %d\n",
__FUNCTION__,
bio->bi_idx, bio->bi_vcnt); bio->bi_idx, bio->bi_vcnt);
break; break;
} }
BIO_BUG_ON(bio_iovec(bio)->bv_len > bio->bi_size); nbytes = bio_iovec_idx(bio, idx)->bv_len;
BIO_BUG_ON(nbytes > bio->bi_size);
/* /*
* not a complete bvec done * not a complete bvec done
*/ */
nsect = bio_iovec(bio)->bv_len >> 9; if (unlikely(nbytes > nr_bytes)) {
if (unlikely(nsect > nr_sectors)) { bio_iovec(bio)->bv_offset += nr_bytes;
int partial = nr_sectors << 9; bio_iovec(bio)->bv_len -= nr_bytes;
bio_nbytes += nr_bytes;
bio_iovec(bio)->bv_offset += partial; total_bytes += nr_bytes;
bio_iovec(bio)->bv_len -= partial;
bio_endio(bio, partial, error);
total_nsect += nr_sectors;
break; break;
} }
/* /*
* we are ending the last part of the bio, advance req pointer * advance to the next vector
*/ */
if ((nsect << 9) >= bio->bi_size) { next_idx++;
req->bio = bio->bi_next; bio_nbytes += nbytes;
new_bio = 1;
} }
bio_endio(bio, nsect << 9, error); total_bytes += nbytes;
nr_bytes -= nbytes;
total_nsect += nsect;
nr_sectors -= nsect;
/*
* if we didn't advance the req->bio pointer, advance bi_idx
* to indicate we are now on the next bio_vec
*/
if (!new_bio)
bio->bi_idx++;
if ((bio = req->bio)) { if ((bio = req->bio)) {
/* /*
* end more in this run, or just return 'not-done' * end more in this run, or just return 'not-done'
*/ */
if (unlikely(nr_sectors <= 0)) if (unlikely(nr_bytes <= 0))
break; break;
} }
} }
...@@ -2086,17 +2081,64 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors) ...@@ -2086,17 +2081,64 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
/* /*
* if the request wasn't completed, update state * if the request wasn't completed, update state
*/ */
blk_recalc_rq_sectors(req, total_nsect); if (bio_nbytes) {
bio_endio(bio, bio_nbytes, error);
req->bio->bi_idx += next_idx;
}
blk_recalc_rq_sectors(req, total_bytes >> 9);
blk_recalc_rq_segments(req); blk_recalc_rq_segments(req);
return 1; return 1;
} }
/**
* end_that_request_first - end I/O on a request
* @req: the request being processed
* @uptodate: 0 for I/O error
* @nr_sectors: number of sectors to end I/O on
*
* Description:
* Ends I/O on a number of sectors attached to @req, and sets it up
* for the next range of segments (if any) in the cluster.
*
* Return:
* 0 - we are done with this request, call end_that_request_last()
* 1 - still buffers pending for this request
**/
int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
{
return __end_that_request_first(req, uptodate, nr_sectors << 9);
}
/**
* end_that_request_chunk - end I/O on a request
* @req: the request being processed
* @uptodate: 0 for I/O error
* @nr_bytes: number of bytes to complete
*
* Description:
* Ends I/O on a number of bytes attached to @req, and sets it up
* for the next range of segments (if any). Like end_that_request_first(),
* but deals with bytes instead of sectors.
*
* Return:
* 0 - we are done with this request, call end_that_request_last()
* 1 - still buffers pending for this request
**/
int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
{
return __end_that_request_first(req, uptodate, nr_bytes);
}
/*
* queue lock must be held
*/
void end_that_request_last(struct request *req) void end_that_request_last(struct request *req)
{ {
if (req->waiting) if (req->waiting)
complete(req->waiting); complete(req->waiting);
__blk_put_request(req); __blk_put_request(req->q, req);
} }
int __init blk_dev_init(void) int __init blk_dev_init(void)
...@@ -2142,6 +2184,7 @@ int __init blk_dev_init(void) ...@@ -2142,6 +2184,7 @@ int __init blk_dev_init(void)
}; };
EXPORT_SYMBOL(end_that_request_first); EXPORT_SYMBOL(end_that_request_first);
EXPORT_SYMBOL(end_that_request_chunk);
EXPORT_SYMBOL(end_that_request_last); EXPORT_SYMBOL(end_that_request_last);
EXPORT_SYMBOL(blk_init_queue); EXPORT_SYMBOL(blk_init_queue);
EXPORT_SYMBOL(bdev_get_queue); EXPORT_SYMBOL(bdev_get_queue);
......
...@@ -39,6 +39,7 @@ void initrd_init(void); ...@@ -39,6 +39,7 @@ void initrd_init(void);
*/ */
extern int end_that_request_first(struct request *, int, int); extern int end_that_request_first(struct request *, int, int);
extern int end_that_request_chunk(struct request *, int, int);
extern void end_that_request_last(struct request *); extern void end_that_request_last(struct request *);
struct request *elv_next_request(request_queue_t *q); struct request *elv_next_request(request_queue_t *q);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment