Commit 719eb3e1 authored by Jens Axboe's avatar Jens Axboe

[PATCH] soft and hard barriers

Right now we have one type of barrier in the block layer, and that is
used mainly for making sure that the io scheduler doesn't reorder
requests when we don't want it to.  We also need a flag to tell the io
scheduler and low level queue that this is a barrier.  So basically two
needs:

o software barrier, prevents the io scheduler from reordering
o hardware barrier, driver must prevent drive from reordering

So this patch gets rid of REQ_BARRIER and instead adds REQ_SOFTBARRIER
and REQ_HARDBARRIER.
parent 26d15b50
......@@ -207,7 +207,7 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
break;
}
if (__rq->flags & REQ_BARRIER)
if (__rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER))
break;
/*
......@@ -413,7 +413,7 @@ deadline_add_request(request_queue_t *q, struct request *rq, struct list_head *i
* flush hash on barrier insert, as not to allow merges before a
* barrier.
*/
if (unlikely(rq->flags & REQ_BARRIER)) {
if (unlikely(rq->flags & REQ_HARDBARRIER)) {
DL_INVALIDATE_HASH(dd);
q->last_merge = NULL;
}
......
......@@ -176,7 +176,9 @@ int elevator_noop_merge(request_queue_t *q, struct list_head **insert,
while ((entry = entry->prev) != &q->queue_head) {
__rq = list_entry_rq(entry);
if (__rq->flags & (REQ_BARRIER | REQ_STARTED))
if (__rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER))
break;
else if (__rq->flags & REQ_STARTED)
break;
if (!(__rq->flags & REQ_CMD))
......@@ -200,7 +202,7 @@ void elevator_noop_add_request(request_queue_t *q, struct request *rq,
/*
* new merges must not precede this barrier
*/
if (rq->flags & REQ_BARRIER)
if (rq->flags & REQ_HARDBARRIER)
q->last_merge = NULL;
else if (!q->last_merge)
q->last_merge = &rq->queuelist;
......
......@@ -644,7 +644,8 @@ void blk_queue_invalidate_tags(request_queue_t *q)
static char *rq_flags[] = {
"REQ_RW",
"REQ_RW_AHEAD",
"REQ_BARRIER",
"REQ_SOFTBARRIER",
"REQ_HARDBARRIER",
"REQ_CMD",
"REQ_NOMERGE",
"REQ_STARTED",
......@@ -1392,7 +1393,7 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
* must not attempt merges on this) and that it acts as a soft
* barrier
*/
rq->flags |= REQ_SPECIAL | REQ_BARRIER;
rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER;
rq->special = data;
......@@ -1788,7 +1789,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
* REQ_BARRIER implies no merging, but lets make it explicit
*/
if (barrier)
req->flags |= (REQ_BARRIER | REQ_NOMERGE);
req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
req->errors = 0;
req->hard_sector = req->sector = sector;
......
......@@ -2867,7 +2867,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
aic_dev->r_total++;
ptr = aic_dev->r_bins;
}
if(cmd->device->simple_tags && cmd->request->flags & REQ_BARRIER)
if(cmd->device->simple_tags && cmd->request->flags & REQ_HARDBARRIER)
{
aic_dev->barrier_total++;
if(scb->tag_action == MSG_ORDERED_Q_TAG)
......@@ -10206,7 +10206,7 @@ aic7xxx_buildscb(struct aic7xxx_host *p, Scsi_Cmnd *cmd,
/* We always force TEST_UNIT_READY to untagged */
if (cmd->cmnd[0] != TEST_UNIT_READY && sdptr->simple_tags)
{
if (req->flags & REQ_BARRIER)
if (req->flags & REQ_HARDBARRIER)
{
if(sdptr->ordered_tags)
{
......
......@@ -915,8 +915,8 @@ static inline int scsi_populate_tag_msg(Scsi_Cmnd *SCpnt, char *msg) {
if(!blk_rq_tagged(req))
return 0;
if(req->flags & REQ_BARRIER)
if (req->flags & REQ_HARDBARRIER)
*msg++ = MSG_ORDERED_TAG;
else
*msg++ = MSG_SIMPLE_TAG;
......
......@@ -110,7 +110,8 @@ struct request {
enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */
__REQ_RW_AHEAD, /* READA */
__REQ_BARRIER, /* may not be passed */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */
__REQ_CMD, /* is a regular fs rw request */
__REQ_NOMERGE, /* don't touch this for merging */
__REQ_STARTED, /* drive already may have started this one */
......@@ -134,7 +135,8 @@ enum rq_flag_bits {
#define REQ_RW (1 << __REQ_RW)
#define REQ_RW_AHEAD (1 << __REQ_RW_AHEAD)
#define REQ_BARRIER (1 << __REQ_BARRIER)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_CMD (1 << __REQ_CMD)
#define REQ_NOMERGE (1 << __REQ_NOMERGE)
#define REQ_STARTED (1 << __REQ_STARTED)
......@@ -275,9 +277,10 @@ struct request_queue
* mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
* it already be started by driver.
*/
#define RQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
#define rq_mergeable(rq) \
(!((rq)->flags & (REQ_NOMERGE | REQ_STARTED | REQ_BARRIER)) \
&& ((rq)->flags & REQ_CMD))
(!((rq)->flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
/*
* noop, requests are automagically marked as active/inactive by I/O
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment