Commit 31cf92f3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Three patches that should go into this release.

  Two of them are from Paolo and fix up some corner cases with BFQ, and
  the last patch is from Ming and fixes up a potential usage count
  imbalance regression due to the recent NOWAIT work"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: don't leak preempt counter/q_usage_counter when allocating rq failed
  block, bfq: consider also in_service_entity to state whether an entity is active
  block, bfq: reset in_service_entity if it becomes idle
parents d555eb6b 1ad43c00
...@@ -71,17 +71,29 @@ struct bfq_service_tree { ...@@ -71,17 +71,29 @@ struct bfq_service_tree {
* *
* bfq_sched_data is the basic scheduler queue. It supports three * bfq_sched_data is the basic scheduler queue. It supports three
* ioprio_classes, and can be used either as a toplevel queue or as an * ioprio_classes, and can be used either as a toplevel queue or as an
* intermediate queue on a hierarchical setup. @next_in_service * intermediate queue in a hierarchical setup.
* points to the active entity of the sched_data service trees that
* will be scheduled next. It is used to reduce the number of steps
* needed for each hierarchical-schedule update.
* *
* The supported ioprio_classes are the same as in CFQ, in descending * The supported ioprio_classes are the same as in CFQ, in descending
* priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
* Requests from higher priority queues are served before all the * Requests from higher priority queues are served before all the
* requests from lower priority queues; among requests of the same * requests from lower priority queues; among requests of the same
* queue requests are served according to B-WF2Q+. * queue requests are served according to B-WF2Q+.
* All the fields are protected by the queue lock of the containing bfqd. *
* The schedule is implemented by the service trees, plus the field
* @next_in_service, which points to the entity on the active trees
* that will be served next, if 1) no changes in the schedule occurs
* before the current in-service entity is expired, 2) the in-service
* queue becomes idle when it expires, and 3) if the entity pointed by
* in_service_entity is not a queue, then the in-service child entity
* of the entity pointed by in_service_entity becomes idle on
* expiration. This peculiar definition allows for the following
* optimization, not yet exploited: while a given entity is still in
* service, we already know which is the best candidate for next
* service among the other active entitities in the same parent
* entity. We can then quickly compare the timestamps of the
* in-service entity with those of such best candidate.
*
* All fields are protected by the lock of the containing bfqd.
*/ */
struct bfq_sched_data { struct bfq_sched_data {
/* entity in service */ /* entity in service */
......
This diff is collapsed.
...@@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q, ...@@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q,
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct request *rq; struct request *rq;
unsigned int tag; unsigned int tag;
struct blk_mq_ctx *local_ctx = NULL;
blk_queue_enter_live(q); blk_queue_enter_live(q);
data->q = q; data->q = q;
if (likely(!data->ctx)) if (likely(!data->ctx))
data->ctx = blk_mq_get_ctx(q); data->ctx = local_ctx = blk_mq_get_ctx(q);
if (likely(!data->hctx)) if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->ctx->cpu); data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
if (op & REQ_NOWAIT) if (op & REQ_NOWAIT)
...@@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q, ...@@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
tag = blk_mq_get_tag(data); tag = blk_mq_get_tag(data);
if (tag == BLK_MQ_TAG_FAIL) { if (tag == BLK_MQ_TAG_FAIL) {
if (local_ctx) {
blk_mq_put_ctx(local_ctx);
data->ctx = NULL;
}
blk_queue_exit(q); blk_queue_exit(q);
return NULL; return NULL;
} }
...@@ -356,12 +361,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, ...@@ -356,12 +361,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
rq = blk_mq_get_request(q, NULL, op, &alloc_data); rq = blk_mq_get_request(q, NULL, op, &alloc_data);
blk_mq_put_ctx(alloc_data.ctx);
blk_queue_exit(q);
if (!rq) if (!rq)
return ERR_PTR(-EWOULDBLOCK); return ERR_PTR(-EWOULDBLOCK);
blk_mq_put_ctx(alloc_data.ctx);
blk_queue_exit(q);
rq->__data_len = 0; rq->__data_len = 0;
rq->__sector = (sector_t) -1; rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL; rq->bio = rq->biotail = NULL;
...@@ -407,11 +412,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, ...@@ -407,11 +412,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
rq = blk_mq_get_request(q, NULL, op, &alloc_data); rq = blk_mq_get_request(q, NULL, op, &alloc_data);
blk_queue_exit(q);
if (!rq) if (!rq)
return ERR_PTR(-EWOULDBLOCK); return ERR_PTR(-EWOULDBLOCK);
blk_queue_exit(q);
return rq; return rq;
} }
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment