Commit 53a08807 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block: internal dequeue shouldn't start timer

blkdev_dequeue_request() and elv_dequeue_request() are equivalent and
both start the timeout timer.  Barrier code dequeues the original
barrier request but doesn't passes the request itself to lower level
driver, only broken down proxy requests; however, as the original
barrier code goes through the same dequeue path and timeout timer is
started on it.  If barrier sequence takes long enough, this timer
expires but the low level driver has no idea about this request and
oops follows.

Timeout timer shouldn't have been started on the original barrier
request as it never goes through actual IO.  This patch unexports
elv_dequeue_request(), which has no external user anyway, and makes it
operate on elevator proper w/o adding the timer and make
blkdev_dequeue_request() call elv_dequeue_request() and add timer.
Internal users which don't pass the request to driver - barrier code
and end_that_request_last() - are converted to use
elv_dequeue_request().
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Mike Anderson <andmike@linux.vnet.ibm.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent bf91db18
...@@ -161,7 +161,7 @@ static inline struct request *start_ordered(struct request_queue *q, ...@@ -161,7 +161,7 @@ static inline struct request *start_ordered(struct request_queue *q,
/* /*
* Prep proxy barrier request. * Prep proxy barrier request.
*/ */
blkdev_dequeue_request(rq); elv_dequeue_request(q, rq);
q->orig_bar_rq = rq; q->orig_bar_rq = rq;
rq = &q->bar_rq; rq = &q->bar_rq;
blk_rq_init(q, rq); blk_rq_init(q, rq);
...@@ -219,7 +219,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) ...@@ -219,7 +219,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
* This can happen when the queue switches to * This can happen when the queue switches to
* ORDERED_NONE while this request is on it. * ORDERED_NONE while this request is on it.
*/ */
blkdev_dequeue_request(rq); elv_dequeue_request(q, rq);
if (__blk_end_request(rq, -EOPNOTSUPP, if (__blk_end_request(rq, -EOPNOTSUPP,
blk_rq_bytes(rq))) blk_rq_bytes(rq)))
BUG(); BUG();
......
...@@ -1636,6 +1636,28 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) ...@@ -1636,6 +1636,28 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
} }
EXPORT_SYMBOL_GPL(blk_insert_cloned_request); EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
/**
* blkdev_dequeue_request - dequeue request and start timeout timer
* @req: request to dequeue
*
* Dequeue @req and start timeout timer on it. This hands off the
* request to the driver.
*
* Block internal functions which don't want to start timer should
* call elv_dequeue_request().
*/
void blkdev_dequeue_request(struct request *req)
{
elv_dequeue_request(req->q, req);
/*
* We are now handing the request to the hardware, add the
* timeout handler.
*/
blk_add_timer(req);
}
EXPORT_SYMBOL(blkdev_dequeue_request);
/** /**
* __end_that_request_first - end I/O on a request * __end_that_request_first - end I/O on a request
* @req: the request being processed * @req: the request being processed
...@@ -1774,7 +1796,7 @@ static void end_that_request_last(struct request *req, int error) ...@@ -1774,7 +1796,7 @@ static void end_that_request_last(struct request *req, int error)
blk_queue_end_tag(req->q, req); blk_queue_end_tag(req->q, req);
if (blk_queued_rq(req)) if (blk_queued_rq(req))
blkdev_dequeue_request(req); elv_dequeue_request(req->q, req);
if (unlikely(laptop_mode) && blk_fs_request(req)) if (unlikely(laptop_mode) && blk_fs_request(req))
laptop_io_completion(); laptop_io_completion();
......
...@@ -844,14 +844,7 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq) ...@@ -844,14 +844,7 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
*/ */
if (blk_account_rq(rq)) if (blk_account_rq(rq))
q->in_flight++; q->in_flight++;
/*
* We are now handing the request to the hardware, add the
* timeout handler.
*/
blk_add_timer(rq);
} }
EXPORT_SYMBOL(elv_dequeue_request);
int elv_queue_empty(struct request_queue *q) int elv_queue_empty(struct request_queue *q)
{ {
......
...@@ -786,6 +786,8 @@ static inline void blk_run_address_space(struct address_space *mapping) ...@@ -786,6 +786,8 @@ static inline void blk_run_address_space(struct address_space *mapping)
blk_run_backing_dev(mapping->backing_dev_info, NULL); blk_run_backing_dev(mapping->backing_dev_info, NULL);
} }
extern void blkdev_dequeue_request(struct request *req);
/* /*
* blk_end_request() and friends. * blk_end_request() and friends.
* __blk_end_request() and end_request() must be called with * __blk_end_request() and end_request() must be called with
...@@ -820,11 +822,6 @@ extern void blk_update_request(struct request *rq, int error, ...@@ -820,11 +822,6 @@ extern void blk_update_request(struct request *rq, int error,
extern unsigned int blk_rq_bytes(struct request *rq); extern unsigned int blk_rq_bytes(struct request *rq);
extern unsigned int blk_rq_cur_bytes(struct request *rq); extern unsigned int blk_rq_cur_bytes(struct request *rq);
static inline void blkdev_dequeue_request(struct request *req)
{
elv_dequeue_request(req->q, req);
}
/* /*
* Access functions for manipulating queue properties * Access functions for manipulating queue properties
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment