Commit bd24aecd authored by James Bottomley's avatar James Bottomley

merge by hand of axboe/jejb changes

parents ea3bedb6 c5868eb7
......@@ -118,6 +118,8 @@ deadline_find_hash(struct deadline_data *dd, sector_t offset)
while ((entry = next) != hash_list) {
next = entry->next;
prefetch(next);
drq = list_entry_hash(entry);
BUG_ON(!drq->hash_valid_count);
......@@ -191,6 +193,8 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
while ((entry = entry->prev) != sort_list) {
__rq = list_entry_rq(entry);
prefetch(entry->prev);
BUG_ON(__rq->flags & REQ_STARTED);
if (!(__rq->flags & REQ_CMD))
......@@ -298,6 +302,8 @@ static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
struct list_head *nxt = rq->queuelist.next;
int this_rq_cost;
prefetch(nxt);
/*
* take it off the sort and fifo list, move
* to dispatch queue
......
......@@ -272,13 +272,27 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
e->elevator_merge_req_fn(q, rq, next);
}
/*
* add_request and next_request are required to be supported, naturally
*/
void __elv_add_request(request_queue_t *q, struct request *rq,
struct list_head *insert_here)
void __elv_add_request(request_queue_t *q, struct request *rq, int at_end,
int plug)
{
struct list_head *insert = &q->queue_head;
if (at_end)
insert = insert->prev;
if (plug)
blk_plug_device(q);
q->elevator.elevator_add_req_fn(q, rq, insert);
}
void elv_add_request(request_queue_t *q, struct request *rq, int at_end,
int plug)
{
q->elevator.elevator_add_req_fn(q, rq, insert_here);
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__elv_add_request(q, rq, at_end, plug);
spin_unlock_irqrestore(q->queue_lock, flags);
}
static inline struct request *__elv_next_request(request_queue_t *q)
......@@ -289,8 +303,14 @@ static inline struct request *__elv_next_request(request_queue_t *q)
struct request *elv_next_request(request_queue_t *q)
{
struct request *rq;
int ret;
while ((rq = __elv_next_request(q))) {
/*
* just mark as started even if we don't start it, a request
* that has been delayed should not be passed by new incoming
* requests
*/
rq->flags |= REQ_STARTED;
if (&rq->queuelist == q->last_merge)
......@@ -299,20 +319,22 @@ struct request *elv_next_request(request_queue_t *q)
if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
break;
/*
* all ok, break and return it
*/
if (!q->prep_rq_fn(q, rq))
ret = q->prep_rq_fn(q, rq);
if (ret == BLKPREP_OK) {
break;
/*
* prep said no-go, kill it
*/
blkdev_dequeue_request(rq);
if (end_that_request_first(rq, 0, rq->nr_sectors))
BUG();
end_that_request_last(rq);
} else if (ret == BLKPREP_DEFER) {
rq = NULL;
break;
} else if (ret == BLKPREP_KILL) {
blkdev_dequeue_request(rq);
rq->flags |= REQ_QUIET;
while (end_that_request_first(rq, 0, rq->nr_sectors))
;
end_that_request_last(rq);
} else {
printk("%s: bad return=%d\n", __FUNCTION__, ret);
break;
}
}
return rq;
......@@ -322,6 +344,16 @@ void elv_remove_request(request_queue_t *q, struct request *rq)
{
elevator_t *e = &q->elevator;
/*
* the main clearing point for q->last_merge is on retrieval of
* request by driver (it calls elv_next_request()), but it _can_
* also happen here if a request is added to the queue but later
* deleted without ever being given to driver (merged with another
* request).
*/
if (&rq->queuelist == q->last_merge)
q->last_merge = NULL;
if (e->elevator_remove_req_fn)
e->elevator_remove_req_fn(q, rq);
}
......@@ -357,6 +389,7 @@ module_init(elevator_global_init);
EXPORT_SYMBOL(elevator_noop);
EXPORT_SYMBOL(elv_add_request);
EXPORT_SYMBOL(__elv_add_request);
EXPORT_SYMBOL(elv_next_request);
EXPORT_SYMBOL(elv_remove_request);
......
......@@ -242,6 +242,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
q->backing_dev_info.state = 0;
blk_queue_max_sectors(q, MAX_SECTORS);
blk_queue_hardsect_size(q, 512);
blk_queue_dma_alignment(q, 511);
/*
* by default assume old behaviour and bounce for any highmem page
......@@ -408,6 +409,21 @@ void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
q->seg_boundary_mask = mask;
}
/**
* blk_queue_dma_alignment - set dma length and memory alignment
* @q: the request queue for the device
* @dma_mask: alignment mask
*
* description:
* set required memory and length aligment for direct dma transactions.
* this is used when buiding direct io requests for the queue.
*
**/
void blk_queue_dma_alignment(request_queue_t *q, int mask)
{
q->dma_alignment = mask;
}
void blk_queue_assign_lock(request_queue_t *q, spinlock_t *lock)
{
spin_lock_init(lock);
......@@ -639,7 +655,7 @@ void blk_queue_invalidate_tags(request_queue_t *q)
blk_queue_end_tag(q, rq);
rq->flags &= ~REQ_STARTED;
elv_add_request(q, rq, 0);
__elv_add_request(q, rq, 0, 0);
}
}
......@@ -655,14 +671,19 @@ static char *rq_flags[] = {
"REQ_PC",
"REQ_BLOCK_PC",
"REQ_SENSE",
"REQ_FAILED",
"REQ_QUIET",
"REQ_SPECIAL"
"REQ_DRIVE_CMD",
"REQ_DRIVE_TASK",
"REQ_DRIVE_TASKFILE",
};
void blk_dump_rq_flags(struct request *rq, char *msg)
{
int bit;
printk("%s: dev %02x:%02x: ", msg, major(rq->rq_dev), minor(rq->rq_dev));
printk("%s: dev %02x:%02x: flags = ", msg, major(rq->rq_dev), minor(rq->rq_dev));
bit = 0;
do {
if (rq->flags & (1 << bit))
......@@ -670,10 +691,17 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
bit++;
} while (bit < __REQ_NR_BITS);
printk("sector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
rq->nr_sectors,
rq->current_nr_sectors);
printk("bio %p, biotail %p\n", rq->bio, rq->biotail);
printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) {
printk("cdb: ");
for (bit = 0; bit < sizeof(rq->cmd); bit++)
printk("%02x ", rq->cmd[bit]);
printk("\n");
}
}
void blk_recount_segments(request_queue_t *q, struct bio *bio)
......@@ -1466,7 +1494,7 @@ static inline void add_request(request_queue_t * q, struct request * req,
* elevator indicated where it wants this request to be
* inserted at elevator_merge time
*/
__elv_add_request(q, req, insert_here);
__elv_add_request_pos(q, req, insert_here);
}
/*
......@@ -1481,11 +1509,6 @@ void blk_put_request(struct request *req)
req->q = NULL;
req->rl = NULL;
if (q) {
if (q->last_merge == &req->queuelist)
q->last_merge = NULL;
}
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
......@@ -2112,6 +2135,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments);
EXPORT_SYMBOL(blk_queue_max_segment_size);
EXPORT_SYMBOL(blk_queue_hardsect_size);
EXPORT_SYMBOL(blk_queue_segment_boundary);
EXPORT_SYMBOL(blk_queue_dma_alignment);
EXPORT_SYMBOL(blk_rq_map_sg);
EXPORT_SYMBOL(blk_nohighio);
EXPORT_SYMBOL(blk_dump_rq_flags);
......
......@@ -240,7 +240,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
SCpnt->request->special = (void *) SCpnt;
if(blk_rq_tagged(SCpnt->request))
blk_queue_end_tag(q, SCpnt->request);
_elv_add_request(q, SCpnt->request, 0, 0);
__elv_add_request(q, SCpnt->request, 0, 0);
}
/*
......
......@@ -109,7 +109,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
break;
}
if (do_wakeup) {
wake_up_interruptible(PIPE_WAIT(*inode));
wake_up_interruptible_sync(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
}
pipe_wait(inode);
......@@ -117,7 +117,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
up(PIPE_SEM(*inode));
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
wake_up_interruptible_sync(PIPE_WAIT(*inode));
wake_up_interruptible(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
}
if (ret > 0)
......
......@@ -101,6 +101,7 @@ struct bio {
#define BIO_EOF 2 /* out-out-bounds error */
#define BIO_SEG_VALID 3 /* nr_hw_seg valid */
#define BIO_CLONED 4 /* doesn't own data */
#define BIO_BOUNCED 5 /* bio is a bounce bio */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
......
......@@ -50,22 +50,6 @@ static inline void blkdev_dequeue_request(struct request *req)
elv_remove_request(req->q, req);
}
#define _elv_add_request_core(q, rq, where, plug) \
do { \
if ((plug)) \
blk_plug_device((q)); \
(q)->elevator.elevator_add_req_fn((q), (rq), (where)); \
} while (0)
#define _elv_add_request(q, rq, back, p) do { \
if ((back)) \
_elv_add_request_core((q), (rq), (q)->queue_head.prev, (p)); \
else \
_elv_add_request_core((q), (rq), &(q)->queue_head, (p)); \
} while (0)
#define elv_add_request(q, rq, back) _elv_add_request((q), (rq), (back), 1)
#if defined(MAJOR_NR) || defined(IDE_DRIVER)
#if (MAJOR_NR != SCSI_TAPE_MAJOR) && (MAJOR_NR != OSST_MAJOR)
#if !defined(IDE_DRIVER)
......
......@@ -215,6 +215,7 @@ struct request_queue
unsigned int max_segment_size;
unsigned long seg_boundary_mask;
unsigned int dma_alignment;
wait_queue_head_t queue_wait;
......@@ -254,6 +255,13 @@ struct request_queue
*/
#define blk_queue_headactive(q, head_active)
/*
* q->prep_rq_fn return values
*/
#define BLKPREP_OK 0 /* serve it */
#define BLKPREP_KILL 1 /* fatal error, kill */
#define BLKPREP_DEFER 2 /* leave on queue */
extern unsigned long blk_max_low_pfn, blk_max_pfn;
/*
......@@ -268,7 +276,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
extern int init_emergency_isa_pool(void);
void blk_queue_bounce(request_queue_t *q, struct bio **bio);
inline void blk_queue_bounce(request_queue_t *q, struct bio **bio);
#define rq_for_each_bio(bio, rq) \
if ((rq->bio)) \
......@@ -339,6 +347,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
......@@ -385,6 +394,21 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
return queue_hardsect_size(bdev_get_queue(bdev));
}
static inline int queue_dma_alignment(request_queue_t *q)
{
int retval = 511;
if (q && q->dma_alignment)
retval = q->dma_alignment;
return retval;
}
static inline int bdev_dma_aligment(struct block_device *bdev)
{
return queue_dma_alignment(bdev_get_queue(bdev));
}
#define blk_finished_io(nsects) do { } while (0)
#define blk_started_io(nsects) do { } while (0)
......
......@@ -40,8 +40,8 @@ struct elevator_s
/*
* block elevator interface
*/
extern void __elv_add_request(request_queue_t *, struct request *,
struct list_head *);
extern void elv_add_request(request_queue_t *, struct request *, int, int);
extern void __elv_add_request(request_queue_t *, struct request *, int, int);
extern int elv_merge(request_queue_t *, struct list_head **, struct bio *);
extern void elv_merge_requests(request_queue_t *, struct request *,
struct request *);
......@@ -50,6 +50,9 @@ extern void elv_remove_request(request_queue_t *, struct request *);
extern int elv_queue_empty(request_queue_t *);
extern inline struct list_head *elv_get_sort_head(request_queue_t *, struct request *);
#define __elv_add_request_pos(q, rq, pos) \
(q)->elevator.elevator_add_req_fn((q), (rq), (pos))
/*
* noop I/O scheduler. always merges, always inserts new request at tail
*/
......
......@@ -366,34 +366,13 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
return 0;
}
void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, int bio_gfp,
mempool_t *pool)
{
struct page *page;
struct bio *bio = NULL;
int i, rw = bio_data_dir(*bio_orig), bio_gfp;
int i, rw = bio_data_dir(*bio_orig);
struct bio_vec *to, *from;
mempool_t *pool;
unsigned long pfn = q->bounce_pfn;
int gfp = q->bounce_gfp;
BUG_ON((*bio_orig)->bi_idx);
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments
*/
if (!(gfp & GFP_DMA)) {
if (pfn >= blk_max_pfn)
return;
bio_gfp = GFP_NOHIGHIO;
pool = page_pool;
} else {
BUG_ON(!isa_page_pool);
bio_gfp = GFP_NOIO;
pool = isa_page_pool;
}
bio_for_each_segment(from, *bio_orig, i) {
page = from->bv_page;
......@@ -401,7 +380,7 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
/*
* is destination page below bounce pfn?
*/
if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_pfn) < pfn)
if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_pfn) < q->bounce_pfn)
continue;
/*
......@@ -412,11 +391,11 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
to = bio->bi_io_vec + i;
to->bv_page = mempool_alloc(pool, gfp);
to->bv_page = mempool_alloc(pool, q->bounce_gfp);
to->bv_len = from->bv_len;
to->bv_offset = from->bv_offset;
if (rw & WRITE) {
if (rw == WRITE) {
char *vto, *vfrom;
vto = page_address(to->bv_page) + to->bv_offset;
......@@ -437,15 +416,16 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
* pages
*/
bio_for_each_segment(from, *bio_orig, i) {
to = &bio->bi_io_vec[i];
to = bio_iovec_idx(bio, i);
if (!to->bv_page) {
to->bv_page = from->bv_page;
to->bv_len = from->bv_len;
to->bv_offset = to->bv_offset;
to->bv_offset = from->bv_offset;
}
}
bio->bi_bdev = (*bio_orig)->bi_bdev;
bio->bi_flags |= (1 << BIO_BOUNCED);
bio->bi_sector = (*bio_orig)->bi_sector;
bio->bi_rw = (*bio_orig)->bi_rw;
......@@ -454,14 +434,12 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
bio->bi_size = (*bio_orig)->bi_size;
if (pool == page_pool) {
if (rw & WRITE)
bio->bi_end_io = bounce_end_io_write;
else
bio->bi_end_io = bounce_end_io_write;
if (rw == READ)
bio->bi_end_io = bounce_end_io_read;
} else {
if (rw & WRITE)
bio->bi_end_io = bounce_end_io_write_isa;
else
bio->bi_end_io = bounce_end_io_write_isa;
if (rw == READ)
bio->bi_end_io = bounce_end_io_read_isa;
}
......@@ -469,6 +447,37 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
*bio_orig = bio;
}
inline void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
{
mempool_t *pool;
int bio_gfp;
BUG_ON((*bio_orig)->bi_idx);
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments
*/
if (!(q->bounce_gfp & GFP_DMA)) {
if (q->bounce_pfn >= blk_max_pfn)
return;
bio_gfp = GFP_NOHIGHIO;
pool = page_pool;
} else {
BUG_ON(!isa_page_pool);
bio_gfp = GFP_NOIO;
pool = isa_page_pool;
}
/*
* slow path
*/
__blk_queue_bounce(q, bio_orig, bio_gfp, pool);
}
#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_HIGHMEM)
void check_highmem_ptes(void)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment