Commit 707c584e authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.5

into home.osdl.org:/home/torvalds/v2.5/linux
parents c81eb7f2 b99c2656
...@@ -255,7 +255,7 @@ static void as_remove_merge_hints(request_queue_t *q, struct as_rq *arq) ...@@ -255,7 +255,7 @@ static void as_remove_merge_hints(request_queue_t *q, struct as_rq *arq)
{ {
as_del_arq_hash(arq); as_del_arq_hash(arq);
if (q->last_merge == &arq->request->queuelist) if (q->last_merge == arq->request)
q->last_merge = NULL; q->last_merge = NULL;
} }
...@@ -1347,50 +1347,39 @@ static void as_requeue_request(request_queue_t *q, struct request *rq) ...@@ -1347,50 +1347,39 @@ static void as_requeue_request(request_queue_t *q, struct request *rq)
} }
static void static void
as_insert_request(request_queue_t *q, struct request *rq, as_insert_request(request_queue_t *q, struct request *rq, int where)
struct list_head *insert_here)
{ {
struct as_data *ad = q->elevator.elevator_data; struct as_data *ad = q->elevator.elevator_data;
struct as_rq *arq = RQ_DATA(rq); struct as_rq *arq = RQ_DATA(rq);
if (unlikely(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) { switch (where) {
q->last_merge = NULL; case ELEVATOR_INSERT_BACK:
if (insert_here != ad->dispatch) {
while (ad->next_arq[REQ_SYNC]) while (ad->next_arq[REQ_SYNC])
as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]); as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
while (ad->next_arq[REQ_ASYNC]) while (ad->next_arq[REQ_ASYNC])
as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]); as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
} list_add_tail(&rq->queuelist, ad->dispatch);
break;
if (!insert_here) case ELEVATOR_INSERT_FRONT:
insert_here = ad->dispatch->prev; list_add(&rq->queuelist, ad->dispatch);
}
if (unlikely(!blk_fs_request(rq))) {
if (!insert_here)
insert_here = ad->dispatch;
}
if (insert_here) {
list_add(&rq->queuelist, insert_here);
/* Stop anticipating - let this request get through */
if (list_empty(ad->dispatch))
as_antic_stop(ad); as_antic_stop(ad);
break;
return; case ELEVATOR_INSERT_SORT:
BUG_ON(!blk_fs_request(rq));
as_add_request(ad, arq);
break;
default:
printk("%s: bad insert point %d\n", __FUNCTION__,where);
return;
} }
if (rq_mergeable(rq)) { if (rq_mergeable(rq)) {
as_add_arq_hash(ad, arq); as_add_arq_hash(ad, arq);
if (!q->last_merge) if (!q->last_merge)
q->last_merge = &rq->queuelist; q->last_merge = rq;
} }
as_add_request(ad, arq);
} }
/* /*
...@@ -1438,7 +1427,7 @@ as_latter_request(request_queue_t *q, struct request *rq) ...@@ -1438,7 +1427,7 @@ as_latter_request(request_queue_t *q, struct request *rq)
} }
static int static int
as_merge(request_queue_t *q, struct list_head **insert, struct bio *bio) as_merge(request_queue_t *q, struct request **req, struct bio *bio)
{ {
struct as_data *ad = q->elevator.elevator_data; struct as_data *ad = q->elevator.elevator_data;
sector_t rb_key = bio->bi_sector + bio_sectors(bio); sector_t rb_key = bio->bi_sector + bio_sectors(bio);
...@@ -1450,7 +1439,7 @@ as_merge(request_queue_t *q, struct list_head **insert, struct bio *bio) ...@@ -1450,7 +1439,7 @@ as_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
*/ */
ret = elv_try_last_merge(q, bio); ret = elv_try_last_merge(q, bio);
if (ret != ELEVATOR_NO_MERGE) { if (ret != ELEVATOR_NO_MERGE) {
__rq = list_entry_rq(q->last_merge); __rq = q->last_merge;
goto out_insert; goto out_insert;
} }
...@@ -1482,11 +1471,11 @@ as_merge(request_queue_t *q, struct list_head **insert, struct bio *bio) ...@@ -1482,11 +1471,11 @@ as_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
return ELEVATOR_NO_MERGE; return ELEVATOR_NO_MERGE;
out: out:
q->last_merge = &__rq->queuelist; q->last_merge = __rq;
out_insert: out_insert:
if (ret) if (ret)
as_hot_arq_hash(ad, RQ_DATA(__rq)); as_hot_arq_hash(ad, RQ_DATA(__rq));
*insert = &__rq->queuelist; *req = __rq;
return ret; return ret;
} }
...@@ -1514,7 +1503,7 @@ static void as_merged_request(request_queue_t *q, struct request *req) ...@@ -1514,7 +1503,7 @@ static void as_merged_request(request_queue_t *q, struct request *req)
*/ */
} }
q->last_merge = &req->queuelist; q->last_merge = req;
} }
static void static void
......
...@@ -33,13 +33,7 @@ static const int deadline_hash_shift = 5; ...@@ -33,13 +33,7 @@ static const int deadline_hash_shift = 5;
#define DL_HASH_ENTRIES (1 << deadline_hash_shift) #define DL_HASH_ENTRIES (1 << deadline_hash_shift)
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
#define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash) #define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash)
#define ON_HASH(drq) (drq)->hash_valid_count #define ON_HASH(drq) (drq)->on_hash
#define DL_INVALIDATE_HASH(dd) \
do { \
if (!++(dd)->hash_valid_count) \
(dd)->hash_valid_count = 1; \
} while (0)
struct deadline_data { struct deadline_data {
/* /*
...@@ -58,7 +52,6 @@ struct deadline_data { ...@@ -58,7 +52,6 @@ struct deadline_data {
struct deadline_rq *next_drq[2]; struct deadline_rq *next_drq[2];
struct list_head *dispatch; /* driver dispatch queue */ struct list_head *dispatch; /* driver dispatch queue */
struct list_head *hash; /* request hash */ struct list_head *hash; /* request hash */
unsigned long hash_valid_count; /* barrier hash count */
unsigned int batching; /* number of sequential requests made */ unsigned int batching; /* number of sequential requests made */
sector_t last_sector; /* head position */ sector_t last_sector; /* head position */
unsigned int starved; /* times reads have starved writes */ unsigned int starved; /* times reads have starved writes */
...@@ -90,7 +83,7 @@ struct deadline_rq { ...@@ -90,7 +83,7 @@ struct deadline_rq {
* request hash, key is the ending offset (for back merge lookup) * request hash, key is the ending offset (for back merge lookup)
*/ */
struct list_head hash; struct list_head hash;
unsigned long hash_valid_count; char on_hash;
/* /*
* expire fifo * expire fifo
...@@ -110,7 +103,7 @@ static kmem_cache_t *drq_pool; ...@@ -110,7 +103,7 @@ static kmem_cache_t *drq_pool;
*/ */
static inline void __deadline_del_drq_hash(struct deadline_rq *drq) static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
{ {
drq->hash_valid_count = 0; drq->on_hash = 0;
list_del_init(&drq->hash); list_del_init(&drq->hash);
} }
...@@ -125,7 +118,7 @@ deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq) ...@@ -125,7 +118,7 @@ deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq)
{ {
deadline_del_drq_hash(drq); deadline_del_drq_hash(drq);
if (q->last_merge == &drq->request->queuelist) if (q->last_merge == drq->request)
q->last_merge = NULL; q->last_merge = NULL;
} }
...@@ -136,7 +129,7 @@ deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) ...@@ -136,7 +129,7 @@ deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
BUG_ON(ON_HASH(drq)); BUG_ON(ON_HASH(drq));
drq->hash_valid_count = dd->hash_valid_count; drq->on_hash = 1;
list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]); list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
} }
...@@ -169,8 +162,7 @@ deadline_find_drq_hash(struct deadline_data *dd, sector_t offset) ...@@ -169,8 +162,7 @@ deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
BUG_ON(!ON_HASH(drq)); BUG_ON(!ON_HASH(drq));
if (!rq_mergeable(__rq) if (!rq_mergeable(__rq)) {
|| drq->hash_valid_count != dd->hash_valid_count) {
__deadline_del_drq_hash(drq); __deadline_del_drq_hash(drq);
continue; continue;
} }
...@@ -324,7 +316,7 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq) ...@@ -324,7 +316,7 @@ static void deadline_remove_request(request_queue_t *q, struct request *rq)
} }
static int static int
deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio) deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
{ {
struct deadline_data *dd = q->elevator.elevator_data; struct deadline_data *dd = q->elevator.elevator_data;
struct request *__rq; struct request *__rq;
...@@ -335,7 +327,7 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio) ...@@ -335,7 +327,7 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
*/ */
ret = elv_try_last_merge(q, bio); ret = elv_try_last_merge(q, bio);
if (ret != ELEVATOR_NO_MERGE) { if (ret != ELEVATOR_NO_MERGE) {
__rq = list_entry_rq(q->last_merge); __rq = q->last_merge;
goto out_insert; goto out_insert;
} }
...@@ -371,11 +363,11 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio) ...@@ -371,11 +363,11 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
return ELEVATOR_NO_MERGE; return ELEVATOR_NO_MERGE;
out: out:
q->last_merge = &__rq->queuelist; q->last_merge = __rq;
out_insert: out_insert:
if (ret) if (ret)
deadline_hot_drq_hash(dd, RQ_DATA(__rq)); deadline_hot_drq_hash(dd, RQ_DATA(__rq));
*insert = &__rq->queuelist; *req = __rq;
return ret; return ret;
} }
...@@ -398,7 +390,7 @@ static void deadline_merged_request(request_queue_t *q, struct request *req) ...@@ -398,7 +390,7 @@ static void deadline_merged_request(request_queue_t *q, struct request *req)
deadline_add_drq_rb(dd, drq); deadline_add_drq_rb(dd, drq);
} }
q->last_merge = &req->queuelist; q->last_merge = req;
} }
static void static void
...@@ -621,43 +613,35 @@ static struct request *deadline_next_request(request_queue_t *q) ...@@ -621,43 +613,35 @@ static struct request *deadline_next_request(request_queue_t *q)
} }
static void static void
deadline_insert_request(request_queue_t *q, struct request *rq, deadline_insert_request(request_queue_t *q, struct request *rq, int where)
struct list_head *insert_here)
{ {
struct deadline_data *dd = q->elevator.elevator_data; struct deadline_data *dd = q->elevator.elevator_data;
struct deadline_rq *drq = RQ_DATA(rq); struct deadline_rq *drq = RQ_DATA(rq);
if (unlikely(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) { switch (where) {
DL_INVALIDATE_HASH(dd); case ELEVATOR_INSERT_BACK:
q->last_merge = NULL;
if (insert_here != dd->dispatch) {
while (deadline_dispatch_requests(dd)) while (deadline_dispatch_requests(dd))
; ;
} list_add_tail(&rq->queuelist, dd->dispatch);
break;
if (!insert_here) case ELEVATOR_INSERT_FRONT:
insert_here = dd->dispatch->prev; list_add(&rq->queuelist, dd->dispatch);
} break;
case ELEVATOR_INSERT_SORT:
if (unlikely(!blk_fs_request(rq))) { BUG_ON(!blk_fs_request(rq));
if (!insert_here) deadline_add_request(dd, drq);
insert_here = dd->dispatch; break;
} default:
printk("%s: bad insert point %d\n", __FUNCTION__,where);
if (insert_here) { return;
list_add(&rq->queuelist, insert_here);
return;
} }
if (rq_mergeable(rq)) { if (rq_mergeable(rq)) {
deadline_add_drq_hash(dd, drq); deadline_add_drq_hash(dd, drq);
if (!q->last_merge) if (!q->last_merge)
q->last_merge = &rq->queuelist; q->last_merge = rq;
} }
deadline_add_request(dd, drq);
} }
static int deadline_queue_empty(request_queue_t *q) static int deadline_queue_empty(request_queue_t *q)
...@@ -748,7 +732,6 @@ static int deadline_init(request_queue_t *q, elevator_t *e) ...@@ -748,7 +732,6 @@ static int deadline_init(request_queue_t *q, elevator_t *e)
dd->dispatch = &q->queue_head; dd->dispatch = &q->queue_head;
dd->fifo_expire[READ] = read_expire; dd->fifo_expire[READ] = read_expire;
dd->fifo_expire[WRITE] = write_expire; dd->fifo_expire[WRITE] = write_expire;
dd->hash_valid_count = 1;
dd->writes_starved = writes_starved; dd->writes_starved = writes_starved;
dd->front_merges = 1; dd->front_merges = 1;
dd->fifo_batch = fifo_batch; dd->fifo_batch = fifo_batch;
...@@ -779,7 +762,7 @@ deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask) ...@@ -779,7 +762,7 @@ deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
drq->request = rq; drq->request = rq;
INIT_LIST_HEAD(&drq->hash); INIT_LIST_HEAD(&drq->hash);
drq->hash_valid_count = 0; drq->on_hash = 0;
INIT_LIST_HEAD(&drq->fifo); INIT_LIST_HEAD(&drq->fifo);
......
...@@ -81,7 +81,7 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio) ...@@ -81,7 +81,7 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio)
inline int elv_try_last_merge(request_queue_t *q, struct bio *bio) inline int elv_try_last_merge(request_queue_t *q, struct bio *bio)
{ {
if (q->last_merge) if (q->last_merge)
return elv_try_merge(list_entry_rq(q->last_merge), bio); return elv_try_merge(q->last_merge, bio);
return ELEVATOR_NO_MERGE; return ELEVATOR_NO_MERGE;
} }
...@@ -117,12 +117,12 @@ int elevator_global_init(void) ...@@ -117,12 +117,12 @@ int elevator_global_init(void)
return 0; return 0;
} }
int elv_merge(request_queue_t *q, struct list_head **entry, struct bio *bio) int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
{ {
elevator_t *e = &q->elevator; elevator_t *e = &q->elevator;
if (e->elevator_merge_fn) if (e->elevator_merge_fn)
return e->elevator_merge_fn(q, entry, bio); return e->elevator_merge_fn(q, req, bio);
return ELEVATOR_NO_MERGE; return ELEVATOR_NO_MERGE;
} }
...@@ -140,7 +140,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq, ...@@ -140,7 +140,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
{ {
elevator_t *e = &q->elevator; elevator_t *e = &q->elevator;
if (q->last_merge == &next->queuelist) if (q->last_merge == next)
q->last_merge = NULL; q->last_merge = NULL;
if (e->elevator_merge_req_fn) if (e->elevator_merge_req_fn)
...@@ -156,29 +156,25 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) ...@@ -156,29 +156,25 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
if (q->elevator.elevator_requeue_req_fn) if (q->elevator.elevator_requeue_req_fn)
q->elevator.elevator_requeue_req_fn(q, rq); q->elevator.elevator_requeue_req_fn(q, rq);
else else
__elv_add_request(q, rq, 0, 0); __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
} }
void __elv_add_request(request_queue_t *q, struct request *rq, int at_end, void __elv_add_request(request_queue_t *q, struct request *rq, int where,
int plug) int plug)
{ {
struct list_head *insert = NULL;
if (!at_end)
insert = &q->queue_head;
if (plug) if (plug)
blk_plug_device(q); blk_plug_device(q);
q->elevator.elevator_add_req_fn(q, rq, insert); q->elevator.elevator_add_req_fn(q, rq, where);
} }
void elv_add_request(request_queue_t *q, struct request *rq, int at_end, void elv_add_request(request_queue_t *q, struct request *rq, int where,
int plug) int plug)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
__elv_add_request(q, rq, at_end, plug); __elv_add_request(q, rq, where, plug);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
...@@ -200,7 +196,7 @@ struct request *elv_next_request(request_queue_t *q) ...@@ -200,7 +196,7 @@ struct request *elv_next_request(request_queue_t *q)
*/ */
rq->flags |= REQ_STARTED; rq->flags |= REQ_STARTED;
if (&rq->queuelist == q->last_merge) if (rq == q->last_merge)
q->last_merge = NULL; q->last_merge = NULL;
if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
...@@ -238,7 +234,7 @@ void elv_remove_request(request_queue_t *q, struct request *rq) ...@@ -238,7 +234,7 @@ void elv_remove_request(request_queue_t *q, struct request *rq)
* deleted without ever being given to driver (merged with another * deleted without ever being given to driver (merged with another
* request). * request).
*/ */
if (&rq->queuelist == q->last_merge) if (rq == q->last_merge)
q->last_merge = NULL; q->last_merge = NULL;
if (e->elevator_remove_req_fn) if (e->elevator_remove_req_fn)
......
...@@ -703,7 +703,7 @@ void blk_queue_invalidate_tags(request_queue_t *q) ...@@ -703,7 +703,7 @@ void blk_queue_invalidate_tags(request_queue_t *q)
blk_queue_end_tag(q, rq); blk_queue_end_tag(q, rq);
rq->flags &= ~REQ_STARTED; rq->flags &= ~REQ_STARTED;
__elv_add_request(q, rq, 0, 0); __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
} }
} }
...@@ -1632,11 +1632,16 @@ void blk_insert_request(request_queue_t *q, struct request *rq, ...@@ -1632,11 +1632,16 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
if(reinsert) { if(reinsert) {
blk_requeue_request(q, rq); blk_requeue_request(q, rq);
} else { } else {
int where = ELEVATOR_INSERT_BACK;
if (at_head)
where = ELEVATOR_INSERT_FRONT;
if (blk_rq_tagged(rq)) if (blk_rq_tagged(rq))
blk_queue_end_tag(q, rq); blk_queue_end_tag(q, rq);
drive_stat_acct(rq, rq->nr_sectors, 1); drive_stat_acct(rq, rq->nr_sectors, 1);
__elv_add_request(q, rq, !at_head, 0); __elv_add_request(q, rq, where, 0);
} }
q->request_fn(q); q->request_fn(q);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
...@@ -1669,8 +1674,7 @@ void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) ...@@ -1669,8 +1674,7 @@ void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
* queue lock is held and interrupts disabled, as we muck with the * queue lock is held and interrupts disabled, as we muck with the
* request queue list. * request queue list.
*/ */
static inline void add_request(request_queue_t * q, struct request * req, static inline void add_request(request_queue_t * q, struct request * req)
struct list_head *insert_here)
{ {
drive_stat_acct(req, req->nr_sectors, 1); drive_stat_acct(req, req->nr_sectors, 1);
...@@ -1681,7 +1685,7 @@ static inline void add_request(request_queue_t * q, struct request * req, ...@@ -1681,7 +1685,7 @@ static inline void add_request(request_queue_t * q, struct request * req,
* elevator indicated where it wants this request to be * elevator indicated where it wants this request to be
* inserted at elevator_merge time * inserted at elevator_merge time
*/ */
__elv_add_request_pos(q, req, insert_here); __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
} }
/* /*
...@@ -1880,7 +1884,6 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1880,7 +1884,6 @@ static int __make_request(request_queue_t *q, struct bio *bio)
{ {
struct request *req, *freereq = NULL; struct request *req, *freereq = NULL;
int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, ra; int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, ra;
struct list_head *insert_here;
sector_t sector; sector_t sector;
sector = bio->bi_sector; sector = bio->bi_sector;
...@@ -1903,7 +1906,6 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1903,7 +1906,6 @@ static int __make_request(request_queue_t *q, struct bio *bio)
ra = bio->bi_rw & (1 << BIO_RW_AHEAD); ra = bio->bi_rw & (1 << BIO_RW_AHEAD);
again: again:
insert_here = NULL;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (elv_queue_empty(q)) { if (elv_queue_empty(q)) {
...@@ -1913,17 +1915,13 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1913,17 +1915,13 @@ static int __make_request(request_queue_t *q, struct bio *bio)
if (barrier) if (barrier)
goto get_rq; goto get_rq;
el_ret = elv_merge(q, &insert_here, bio); el_ret = elv_merge(q, &req, bio);
switch (el_ret) { switch (el_ret) {
case ELEVATOR_BACK_MERGE: case ELEVATOR_BACK_MERGE:
req = list_entry_rq(insert_here);
BUG_ON(!rq_mergeable(req)); BUG_ON(!rq_mergeable(req));
if (!q->back_merge_fn(q, req, bio)) { if (!q->back_merge_fn(q, req, bio))
insert_here = &req->queuelist;
break; break;
}
req->biotail->bi_next = bio; req->biotail->bi_next = bio;
req->biotail = bio; req->biotail = bio;
...@@ -1934,14 +1932,10 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1934,14 +1932,10 @@ static int __make_request(request_queue_t *q, struct bio *bio)
goto out; goto out;
case ELEVATOR_FRONT_MERGE: case ELEVATOR_FRONT_MERGE:
req = list_entry_rq(insert_here);
BUG_ON(!rq_mergeable(req)); BUG_ON(!rq_mergeable(req));
if (!q->front_merge_fn(q, req, bio)) { if (!q->front_merge_fn(q, req, bio))
insert_here = req->queuelist.prev;
break; break;
}
bio->bi_next = req->bio; bio->bi_next = req->bio;
req->cbio = req->bio = bio; req->cbio = req->bio = bio;
...@@ -2029,7 +2023,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2029,7 +2023,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
req->rq_disk = bio->bi_bdev->bd_disk; req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies; req->start_time = jiffies;
add_request(q, req, insert_here); add_request(q, req);
out: out:
if (freereq) if (freereq)
__blk_put_request(q, freereq); __blk_put_request(q, freereq);
......
...@@ -17,17 +17,15 @@ ...@@ -17,17 +17,15 @@
/* /*
* See if we can find a request that this buffer can be coalesced with. * See if we can find a request that this buffer can be coalesced with.
*/ */
int elevator_noop_merge(request_queue_t *q, struct list_head **insert, int elevator_noop_merge(request_queue_t *q, struct request **req,
struct bio *bio) struct bio *bio)
{ {
struct list_head *entry = &q->queue_head; struct list_head *entry = &q->queue_head;
struct request *__rq; struct request *__rq;
int ret; int ret;
if ((ret = elv_try_last_merge(q, bio))) { if ((ret = elv_try_last_merge(q, bio)))
*insert = q->last_merge;
return ret; return ret;
}
while ((entry = entry->prev) != &q->queue_head) { while ((entry = entry->prev) != &q->queue_head) {
__rq = list_entry_rq(entry); __rq = list_entry_rq(entry);
...@@ -41,8 +39,8 @@ int elevator_noop_merge(request_queue_t *q, struct list_head **insert, ...@@ -41,8 +39,8 @@ int elevator_noop_merge(request_queue_t *q, struct list_head **insert,
continue; continue;
if ((ret = elv_try_merge(__rq, bio))) { if ((ret = elv_try_merge(__rq, bio))) {
*insert = &__rq->queuelist; *req = __rq;
q->last_merge = &__rq->queuelist; q->last_merge = __rq;
return ret; return ret;
} }
} }
...@@ -57,8 +55,13 @@ void elevator_noop_merge_requests(request_queue_t *q, struct request *req, ...@@ -57,8 +55,13 @@ void elevator_noop_merge_requests(request_queue_t *q, struct request *req,
} }
void elevator_noop_add_request(request_queue_t *q, struct request *rq, void elevator_noop_add_request(request_queue_t *q, struct request *rq,
struct list_head *insert_here) int where)
{ {
struct list_head *insert = q->queue_head.prev;
if (where == ELEVATOR_INSERT_FRONT)
insert = &q->queue_head;
list_add_tail(&rq->queuelist, &q->queue_head); list_add_tail(&rq->queuelist, &q->queue_head);
/* /*
...@@ -67,7 +70,7 @@ void elevator_noop_add_request(request_queue_t *q, struct request *rq, ...@@ -67,7 +70,7 @@ void elevator_noop_add_request(request_queue_t *q, struct request *rq,
if (rq->flags & REQ_HARDBARRIER) if (rq->flags & REQ_HARDBARRIER)
q->last_merge = NULL; q->last_merge = NULL;
else if (!q->last_merge) else if (!q->last_merge)
q->last_merge = &rq->queuelist; q->last_merge = rq;
} }
struct request *elevator_noop_next_request(request_queue_t *q) struct request *elevator_noop_next_request(request_queue_t *q)
......
...@@ -68,7 +68,7 @@ static int blk_do_rq(request_queue_t *q, struct block_device *bdev, ...@@ -68,7 +68,7 @@ static int blk_do_rq(request_queue_t *q, struct block_device *bdev,
rq->flags |= REQ_NOMERGE; rq->flags |= REQ_NOMERGE;
rq->waiting = &wait; rq->waiting = &wait;
elv_add_request(q, rq, 1, 1); elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
generic_unplug_device(q); generic_unplug_device(q);
wait_for_completion(&wait); wait_for_completion(&wait);
......
...@@ -1689,7 +1689,8 @@ static void idedisk_setup (ide_drive_t *drive) ...@@ -1689,7 +1689,8 @@ static void idedisk_setup (ide_drive_t *drive)
write_cache(drive, (id->cfs_enable_2 & 0x3000)); write_cache(drive, (id->cfs_enable_2 & 0x3000));
#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT #ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
HWIF(drive)->ide_dma_queued_on(drive); if (drive->using_dma)
HWIF(drive)->ide_dma_queued_on(drive);
#endif #endif
} }
......
...@@ -1387,7 +1387,7 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio ...@@ -1387,7 +1387,7 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
unsigned long flags; unsigned long flags;
ide_hwgroup_t *hwgroup = HWGROUP(drive); ide_hwgroup_t *hwgroup = HWGROUP(drive);
DECLARE_COMPLETION(wait); DECLARE_COMPLETION(wait);
int insert_end = 1, err; int where = ELEVATOR_INSERT_BACK, err;
int must_wait = (action == ide_wait || action == ide_head_wait); int must_wait = (action == ide_wait || action == ide_head_wait);
#ifdef CONFIG_BLK_DEV_PDC4030 #ifdef CONFIG_BLK_DEV_PDC4030
...@@ -1419,10 +1419,10 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio ...@@ -1419,10 +1419,10 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&ide_lock, flags);
if (action == ide_preempt || action == ide_head_wait) { if (action == ide_preempt || action == ide_head_wait) {
hwgroup->rq = NULL; hwgroup->rq = NULL;
insert_end = 0; where = ELEVATOR_INSERT_FRONT;
rq->flags |= REQ_PREEMPT; rq->flags |= REQ_PREEMPT;
} }
__elv_add_request(drive->queue, rq, insert_end, 0); __elv_add_request(drive->queue, rq, where, 0);
ide_do_request(hwgroup, IDE_NO_IRQ); ide_do_request(hwgroup, IDE_NO_IRQ);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
......
...@@ -102,8 +102,6 @@ struct termio { ...@@ -102,8 +102,6 @@ struct termio {
#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
#define MODULE_ALIAS_LDISC(ldisc) \
MODULE_ALIAS("tty-ldisc-" __stringify(ldisc))
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _I386_TERMIOS_H */ #endif /* _I386_TERMIOS_H */
...@@ -169,9 +169,6 @@ struct winsize { ...@@ -169,9 +169,6 @@ struct winsize {
0; \ 0; \
}) })
#define MODULE_ALIAS_LDISC(ldisc) \
MODULE_ALIAS("tty-ldisc-" __stringify(ldisc))
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _SPARC_TERMIOS_H */ #endif /* _SPARC_TERMIOS_H */
...@@ -168,9 +168,6 @@ struct winsize { ...@@ -168,9 +168,6 @@ struct winsize {
0; \ 0; \
}) })
#define MODULE_ALIAS_LDISC(ldisc) \
MODULE_ALIAS("tty-ldisc-" __stringify(ldisc))
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _SPARC64_TERMIOS_H */ #endif /* _SPARC64_TERMIOS_H */
...@@ -270,7 +270,7 @@ struct request_queue ...@@ -270,7 +270,7 @@ struct request_queue
* Together with queue_head for cacheline sharing * Together with queue_head for cacheline sharing
*/ */
struct list_head queue_head; struct list_head queue_head;
struct list_head *last_merge; struct request *last_merge;
elevator_t elevator; elevator_t elevator;
/* /*
......
#ifndef _LINUX_ELEVATOR_H #ifndef _LINUX_ELEVATOR_H
#define _LINUX_ELEVATOR_H #define _LINUX_ELEVATOR_H
typedef int (elevator_merge_fn) (request_queue_t *, struct list_head **, typedef int (elevator_merge_fn) (request_queue_t *, struct request **,
struct bio *); struct bio *);
typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *); typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *);
...@@ -10,7 +10,7 @@ typedef void (elevator_merged_fn) (request_queue_t *, struct request *); ...@@ -10,7 +10,7 @@ typedef void (elevator_merged_fn) (request_queue_t *, struct request *);
typedef struct request *(elevator_next_req_fn) (request_queue_t *); typedef struct request *(elevator_next_req_fn) (request_queue_t *);
typedef void (elevator_add_req_fn) (request_queue_t *, struct request *, struct list_head *); typedef void (elevator_add_req_fn) (request_queue_t *, struct request *, int);
typedef int (elevator_queue_empty_fn) (request_queue_t *); typedef int (elevator_queue_empty_fn) (request_queue_t *);
typedef void (elevator_remove_req_fn) (request_queue_t *, struct request *); typedef void (elevator_remove_req_fn) (request_queue_t *, struct request *);
typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *); typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *);
...@@ -62,7 +62,7 @@ struct elevator_s ...@@ -62,7 +62,7 @@ struct elevator_s
*/ */
extern void elv_add_request(request_queue_t *, struct request *, int, int); extern void elv_add_request(request_queue_t *, struct request *, int, int);
extern void __elv_add_request(request_queue_t *, struct request *, int, int); extern void __elv_add_request(request_queue_t *, struct request *, int, int);
extern int elv_merge(request_queue_t *, struct list_head **, struct bio *); extern int elv_merge(request_queue_t *, struct request **, struct bio *);
extern void elv_merge_requests(request_queue_t *, struct request *, extern void elv_merge_requests(request_queue_t *, struct request *,
struct request *); struct request *);
extern void elv_merged_request(request_queue_t *, struct request *); extern void elv_merged_request(request_queue_t *, struct request *);
...@@ -79,9 +79,6 @@ extern void elv_completed_request(request_queue_t *, struct request *); ...@@ -79,9 +79,6 @@ extern void elv_completed_request(request_queue_t *, struct request *);
extern int elv_set_request(request_queue_t *, struct request *, int); extern int elv_set_request(request_queue_t *, struct request *, int);
extern void elv_put_request(request_queue_t *, struct request *); extern void elv_put_request(request_queue_t *, struct request *);
#define __elv_add_request_pos(q, rq, pos) \
(q)->elevator.elevator_add_req_fn((q), (rq), (pos))
/* /*
* noop I/O scheduler. always merges, always inserts new request at tail * noop I/O scheduler. always merges, always inserts new request at tail
*/ */
...@@ -111,4 +108,11 @@ extern inline int elv_try_last_merge(request_queue_t *, struct bio *); ...@@ -111,4 +108,11 @@ extern inline int elv_try_last_merge(request_queue_t *, struct bio *);
#define ELEVATOR_FRONT_MERGE 1 #define ELEVATOR_FRONT_MERGE 1
#define ELEVATOR_BACK_MERGE 2 #define ELEVATOR_BACK_MERGE 2
/*
* Insertion selection
*/
#define ELEVATOR_INSERT_FRONT 1
#define ELEVATOR_INSERT_BACK 2
#define ELEVATOR_INSERT_SORT 3
#endif #endif
...@@ -138,4 +138,7 @@ struct tty_ldisc { ...@@ -138,4 +138,7 @@ struct tty_ldisc {
#define LDISC_FLAG_DEFINED 0x00000001 #define LDISC_FLAG_DEFINED 0x00000001
#define MODULE_ALIAS_LDISC(ldisc) \
MODULE_ALIAS("tty-ldisc-" __stringify(ldisc))
#endif /* _LINUX_TTY_LDISC_H */ #endif /* _LINUX_TTY_LDISC_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment