Commit 797e7dbb authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

[BLOCK] reimplement handling of barrier request

Reimplement handling of barrier requests.

* Flexible handling to deal with various capabilities of
  target devices.
* Retry support for falling back.
* Tagged queues which don't support ordered tag can do ordered.
Signed-off-by: default avatarTejun Heo <htejun@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent 52d9e675
...@@ -304,15 +304,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) ...@@ -304,15 +304,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
rq->flags &= ~REQ_STARTED; rq->flags &= ~REQ_STARTED;
/* __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
* if this is the flush, requeue the original instead and drop the flush
*/
if (rq->flags & REQ_BAR_FLUSH) {
clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
rq = rq->end_io_data;
}
__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
} }
static void elv_drain_elevator(request_queue_t *q) static void elv_drain_elevator(request_queue_t *q)
...@@ -332,7 +324,18 @@ static void elv_drain_elevator(request_queue_t *q) ...@@ -332,7 +324,18 @@ static void elv_drain_elevator(request_queue_t *q)
void __elv_add_request(request_queue_t *q, struct request *rq, int where, void __elv_add_request(request_queue_t *q, struct request *rq, int where,
int plug) int plug)
{ {
struct list_head *pos;
unsigned ordseq;
if (q->ordcolor)
rq->flags |= REQ_ORDERED_COLOR;
if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
/*
* toggle ordered color
*/
q->ordcolor ^= 1;
/* /*
* barriers implicitly indicate back insertion * barriers implicitly indicate back insertion
*/ */
...@@ -393,6 +396,30 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, ...@@ -393,6 +396,30 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
q->elevator->ops->elevator_add_req_fn(q, rq); q->elevator->ops->elevator_add_req_fn(q, rq);
break; break;
case ELEVATOR_INSERT_REQUEUE:
/*
* If ordered flush isn't in progress, we do front
* insertion; otherwise, requests should be requeued
* in ordseq order.
*/
rq->flags |= REQ_SOFTBARRIER;
if (q->ordseq == 0) {
list_add(&rq->queuelist, &q->queue_head);
break;
}
ordseq = blk_ordered_req_seq(rq);
list_for_each(pos, &q->queue_head) {
struct request *pos_rq = list_entry_rq(pos);
if (ordseq <= blk_ordered_req_seq(pos_rq))
break;
}
list_add_tail(&rq->queuelist, pos);
break;
default: default:
printk(KERN_ERR "%s: bad insertion point %d\n", printk(KERN_ERR "%s: bad insertion point %d\n",
__FUNCTION__, where); __FUNCTION__, where);
...@@ -422,25 +449,16 @@ static inline struct request *__elv_next_request(request_queue_t *q) ...@@ -422,25 +449,16 @@ static inline struct request *__elv_next_request(request_queue_t *q)
{ {
struct request *rq; struct request *rq;
if (unlikely(list_empty(&q->queue_head) && while (1) {
!q->elevator->ops->elevator_dispatch_fn(q, 0))) while (!list_empty(&q->queue_head)) {
return NULL; rq = list_entry_rq(q->queue_head.next);
if (blk_do_ordered(q, &rq))
rq = list_entry_rq(q->queue_head.next); return rq;
}
/*
* if this is a barrier write and the device has to issue a
* flush sequence to support it, check how far we are
*/
if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
if (q->ordered == QUEUE_ORDERED_FLUSH && if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
!blk_barrier_preflush(rq)) return NULL;
rq = blk_start_pre_flush(q, rq);
} }
return rq;
} }
struct request *elv_next_request(request_queue_t *q) struct request *elv_next_request(request_queue_t *q)
...@@ -593,7 +611,21 @@ void elv_completed_request(request_queue_t *q, struct request *rq) ...@@ -593,7 +611,21 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
* request is released from the driver, io must be done * request is released from the driver, io must be done
*/ */
if (blk_account_rq(rq)) { if (blk_account_rq(rq)) {
struct request *first_rq = list_entry_rq(q->queue_head.next);
q->in_flight--; q->in_flight--;
/*
* Check if the queue is waiting for fs requests to be
* drained for flush sequence.
*/
if (q->ordseq && q->in_flight == 0 &&
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
q->request_fn(q);
}
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
e->ops->elevator_completed_req_fn(q, rq); e->ops->elevator_completed_req_fn(q, rq);
} }
......
This diff is collapsed.
...@@ -207,6 +207,7 @@ enum rq_flag_bits { ...@@ -207,6 +207,7 @@ enum rq_flag_bits {
__REQ_SORTED, /* elevator knows about this request */ __REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */ __REQ_HARDBARRIER, /* may not be passed by drive either */
__REQ_FUA, /* forced unit access */
__REQ_CMD, /* is a regular fs rw request */ __REQ_CMD, /* is a regular fs rw request */
__REQ_NOMERGE, /* don't touch this for merging */ __REQ_NOMERGE, /* don't touch this for merging */
__REQ_STARTED, /* drive already may have started this one */ __REQ_STARTED, /* drive already may have started this one */
...@@ -230,9 +231,7 @@ enum rq_flag_bits { ...@@ -230,9 +231,7 @@ enum rq_flag_bits {
__REQ_PM_SUSPEND, /* suspend request */ __REQ_PM_SUSPEND, /* suspend request */
__REQ_PM_RESUME, /* resume request */ __REQ_PM_RESUME, /* resume request */
__REQ_PM_SHUTDOWN, /* shutdown request */ __REQ_PM_SHUTDOWN, /* shutdown request */
__REQ_BAR_PREFLUSH, /* barrier pre-flush done */ __REQ_ORDERED_COLOR, /* is before or after barrier */
__REQ_BAR_POSTFLUSH, /* barrier post-flush */
__REQ_BAR_FLUSH, /* rq is the flush request */
__REQ_NR_BITS, /* stops here */ __REQ_NR_BITS, /* stops here */
}; };
...@@ -241,6 +240,7 @@ enum rq_flag_bits { ...@@ -241,6 +240,7 @@ enum rq_flag_bits {
#define REQ_SORTED (1 << __REQ_SORTED) #define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_FUA (1 << __REQ_FUA)
#define REQ_CMD (1 << __REQ_CMD) #define REQ_CMD (1 << __REQ_CMD)
#define REQ_NOMERGE (1 << __REQ_NOMERGE) #define REQ_NOMERGE (1 << __REQ_NOMERGE)
#define REQ_STARTED (1 << __REQ_STARTED) #define REQ_STARTED (1 << __REQ_STARTED)
...@@ -260,9 +260,7 @@ enum rq_flag_bits { ...@@ -260,9 +260,7 @@ enum rq_flag_bits {
#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND) #define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND)
#define REQ_PM_RESUME (1 << __REQ_PM_RESUME) #define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) #define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
#define REQ_BAR_PREFLUSH (1 << __REQ_BAR_PREFLUSH) #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
#define REQ_BAR_POSTFLUSH (1 << __REQ_BAR_POSTFLUSH)
#define REQ_BAR_FLUSH (1 << __REQ_BAR_FLUSH)
/* /*
* State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
...@@ -292,8 +290,7 @@ struct bio_vec; ...@@ -292,8 +290,7 @@ struct bio_vec;
typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
typedef void (activity_fn) (void *data, int rw); typedef void (activity_fn) (void *data, int rw);
typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
typedef int (prepare_flush_fn) (request_queue_t *, struct request *); typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
typedef void (end_flush_fn) (request_queue_t *, struct request *);
enum blk_queue_state { enum blk_queue_state {
Queue_down, Queue_down,
...@@ -335,7 +332,6 @@ struct request_queue ...@@ -335,7 +332,6 @@ struct request_queue
activity_fn *activity_fn; activity_fn *activity_fn;
issue_flush_fn *issue_flush_fn; issue_flush_fn *issue_flush_fn;
prepare_flush_fn *prepare_flush_fn; prepare_flush_fn *prepare_flush_fn;
end_flush_fn *end_flush_fn;
/* /*
* Dispatch queue sorting * Dispatch queue sorting
...@@ -420,14 +416,11 @@ struct request_queue ...@@ -420,14 +416,11 @@ struct request_queue
/* /*
* reserved for flush operations * reserved for flush operations
*/ */
struct request *flush_rq; unsigned int ordered, next_ordered, ordseq;
unsigned char ordered; int orderr, ordcolor;
}; struct request pre_flush_rq, bar_rq, post_flush_rq;
struct request *orig_bar_rq;
enum { unsigned int bi_size;
QUEUE_ORDERED_NONE,
QUEUE_ORDERED_TAG,
QUEUE_ORDERED_FLUSH,
}; };
#define RQ_INACTIVE (-1) #define RQ_INACTIVE (-1)
...@@ -445,12 +438,51 @@ enum { ...@@ -445,12 +438,51 @@ enum {
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
#define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */
enum {
/*
* Hardbarrier is supported with one of the following methods.
*
* NONE : hardbarrier unsupported
* DRAIN : ordering by draining is enough
* DRAIN_FLUSH : ordering by draining w/ pre and post flushes
* DRAIN_FUA : ordering by draining w/ pre flush and FUA write
* TAG : ordering by tag is enough
* TAG_FLUSH : ordering by tag w/ pre and post flushes
* TAG_FUA : ordering by tag w/ pre flush and FUA write
*/
QUEUE_ORDERED_NONE = 0x00,
QUEUE_ORDERED_DRAIN = 0x01,
QUEUE_ORDERED_TAG = 0x02,
QUEUE_ORDERED_PREFLUSH = 0x10,
QUEUE_ORDERED_POSTFLUSH = 0x20,
QUEUE_ORDERED_FUA = 0x40,
QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
/*
* Ordered operation sequence
*/
QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */
QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */
QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */
QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */
QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */
QUEUE_ORDSEQ_DONE = 0x20,
};
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_flushing(q) test_bit(QUEUE_FLAG_FLUSH, &(q)->queue_flags) #define blk_queue_flushing(q) ((q)->ordseq)
#define blk_fs_request(rq) ((rq)->flags & REQ_CMD) #define blk_fs_request(rq) ((rq)->flags & REQ_CMD)
#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) #define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC)
...@@ -466,8 +498,7 @@ enum { ...@@ -466,8 +498,7 @@ enum {
#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) #define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED)
#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
#define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH) #define blk_fua_rq(rq) ((rq)->flags & REQ_FUA)
#define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
...@@ -665,11 +696,12 @@ extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); ...@@ -665,11 +696,12 @@ extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int); extern void blk_queue_dma_alignment(request_queue_t *, int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern void blk_queue_ordered(request_queue_t *, int); extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); extern int blk_do_ordered(request_queue_t *, struct request **);
extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); extern unsigned blk_ordered_cur_seq(request_queue_t *);
extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); extern unsigned blk_ordered_req_seq(struct request *);
extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *); extern void blk_dump_rq_flags(struct request *, char *);
......
...@@ -130,6 +130,7 @@ extern int elv_try_last_merge(request_queue_t *, struct bio *); ...@@ -130,6 +130,7 @@ extern int elv_try_last_merge(request_queue_t *, struct bio *);
#define ELEVATOR_INSERT_FRONT 1 #define ELEVATOR_INSERT_FRONT 1
#define ELEVATOR_INSERT_BACK 2 #define ELEVATOR_INSERT_BACK 2
#define ELEVATOR_INSERT_SORT 3 #define ELEVATOR_INSERT_SORT 3
#define ELEVATOR_INSERT_REQUEUE 4
/* /*
* return values from elevator_may_queue_fn * return values from elevator_may_queue_fn
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment