Commit be7f99c5 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: remov blk_queue_invalidate_tags

This function is entirely unused, so remove it and the tag_queue_busy
member of struct request_queue.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 95c7c09f
...@@ -752,18 +752,6 @@ completion of the request to the block layer. This means ending tag ...@@ -752,18 +752,6 @@ completion of the request to the block layer. This means ending tag
operations before calling end_that_request_last()! For an example of a user operations before calling end_that_request_last()! For an example of a user
of these helpers, see the IDE tagged command queueing support. of these helpers, see the IDE tagged command queueing support.
Certain hardware conditions may dictate a need to invalidate the block tag
queue. For instance, on IDE any tagged request error needs to clear both
the hardware and software block queue and enable the driver to sanely restart
all the outstanding requests. There's a third helper to do that:
blk_queue_invalidate_tags(struct request_queue *q)
Clear the internal block tag queue and re-add all the pending requests
to the request queue. The driver will receive them again on the
next request_fn run, just like it did the first time it encountered
them.
3.2.5.2 Tag info 3.2.5.2 Tag info
Some block functions exist to query current tag status or to go from a Some block functions exist to query current tag status or to go from a
...@@ -805,8 +793,7 @@ Internally, block manages tags in the blk_queue_tag structure: ...@@ -805,8 +793,7 @@ Internally, block manages tags in the blk_queue_tag structure:
Most of the above is simple and straight forward, however busy_list may need Most of the above is simple and straight forward, however busy_list may need
a bit of explaining. Normally we don't care too much about request ordering, a bit of explaining. Normally we don't care too much about request ordering,
but in the event of any barrier requests in the tag queue we need to ensure but in the event of any barrier requests in the tag queue we need to ensure
that requests are restarted in the order they were queue. This may happen that requests are restarted in the order they were queue.
if the driver needs to use blk_queue_invalidate_tags().
3.3 I/O Submission 3.3 I/O Submission
......
...@@ -188,7 +188,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth, ...@@ -188,7 +188,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
*/ */
q->queue_tags = tags; q->queue_tags = tags;
queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
INIT_LIST_HEAD(&q->tag_busy_list);
return 0; return 0;
} }
EXPORT_SYMBOL(blk_queue_init_tags); EXPORT_SYMBOL(blk_queue_init_tags);
...@@ -374,27 +373,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) ...@@ -374,27 +373,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
rq->tag = tag; rq->tag = tag;
bqt->tag_index[tag] = rq; bqt->tag_index[tag] = rq;
blk_start_request(rq); blk_start_request(rq);
list_add(&rq->queuelist, &q->tag_busy_list);
return 0; return 0;
} }
EXPORT_SYMBOL(blk_queue_start_tag); EXPORT_SYMBOL(blk_queue_start_tag);
/**
* blk_queue_invalidate_tags - invalidate all pending tags
* @q: the request queue for the device
*
* Description:
* Hardware conditions may dictate a need to stop all pending requests.
* In this case, we will safely clear the block side of the tag queue and
* readd all requests to the request queue in the right order.
**/
void blk_queue_invalidate_tags(struct request_queue *q)
{
struct list_head *tmp, *n;
lockdep_assert_held(q->queue_lock);
list_for_each_safe(tmp, n, &q->tag_busy_list)
blk_requeue_request(q, list_entry_rq(tmp));
}
EXPORT_SYMBOL(blk_queue_invalidate_tags);
...@@ -562,7 +562,6 @@ struct request_queue { ...@@ -562,7 +562,6 @@ struct request_queue {
unsigned int dma_alignment; unsigned int dma_alignment;
struct blk_queue_tag *queue_tags; struct blk_queue_tag *queue_tags;
struct list_head tag_busy_list;
unsigned int nr_sorted; unsigned int nr_sorted;
unsigned int in_flight[2]; unsigned int in_flight[2];
...@@ -1375,7 +1374,6 @@ extern void blk_queue_end_tag(struct request_queue *, struct request *); ...@@ -1375,7 +1374,6 @@ extern void blk_queue_end_tag(struct request_queue *, struct request *);
extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
extern void blk_queue_free_tags(struct request_queue *); extern void blk_queue_free_tags(struct request_queue *);
extern int blk_queue_resize_tags(struct request_queue *, int); extern int blk_queue_resize_tags(struct request_queue *, int);
extern void blk_queue_invalidate_tags(struct request_queue *);
extern struct blk_queue_tag *blk_init_tags(int, int); extern struct blk_queue_tag *blk_init_tags(int, int);
extern void blk_free_tags(struct blk_queue_tag *); extern void blk_free_tags(struct blk_queue_tag *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment