Commit 31cf92f3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Three patches that should go into this release.

  Two of them are from Paolo and fix up some corner cases with BFQ, and
  the last patch is from Ming and fixes up a potential usage count
  imbalance regression due to the recent NOWAIT work"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: don't leak preempt counter/q_usage_counter when allocating rq failed
  block, bfq: consider also in_service_entity to state whether an entity is active
  block, bfq: reset in_service_entity if it becomes idle
parents d555eb6b 1ad43c00
...@@ -71,17 +71,29 @@ struct bfq_service_tree { ...@@ -71,17 +71,29 @@ struct bfq_service_tree {
* *
* bfq_sched_data is the basic scheduler queue. It supports three * bfq_sched_data is the basic scheduler queue. It supports three
* ioprio_classes, and can be used either as a toplevel queue or as an * ioprio_classes, and can be used either as a toplevel queue or as an
* intermediate queue on a hierarchical setup. @next_in_service * intermediate queue in a hierarchical setup.
* points to the active entity of the sched_data service trees that
* will be scheduled next. It is used to reduce the number of steps
* needed for each hierarchical-schedule update.
* *
* The supported ioprio_classes are the same as in CFQ, in descending * The supported ioprio_classes are the same as in CFQ, in descending
* priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
* Requests from higher priority queues are served before all the * Requests from higher priority queues are served before all the
* requests from lower priority queues; among requests of the same * requests from lower priority queues; among requests of the same
* queue requests are served according to B-WF2Q+. * queue requests are served according to B-WF2Q+.
* All the fields are protected by the queue lock of the containing bfqd. *
* The schedule is implemented by the service trees, plus the field
* @next_in_service, which points to the entity on the active trees
* that will be served next, if 1) no changes in the schedule occurs
* before the current in-service entity is expired, 2) the in-service
* queue becomes idle when it expires, and 3) if the entity pointed by
* in_service_entity is not a queue, then the in-service child entity
* of the entity pointed by in_service_entity becomes idle on
* expiration. This peculiar definition allows for the following
* optimization, not yet exploited: while a given entity is still in
* service, we already know which is the best candidate for next
* service among the other active entitities in the same parent
* entity. We can then quickly compare the timestamps of the
* in-service entity with those of such best candidate.
*
* All fields are protected by the lock of the containing bfqd.
*/ */
struct bfq_sched_data { struct bfq_sched_data {
/* entity in service */ /* entity in service */
......
...@@ -188,21 +188,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) ...@@ -188,21 +188,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
/* /*
* This function tells whether entity stops being a candidate for next * This function tells whether entity stops being a candidate for next
* service, according to the following logic. * service, according to the restrictive definition of the field
* next_in_service. In particular, this function is invoked for an
* entity that is about to be set in service.
* *
* This function is invoked for an entity that is about to be set in * If entity is a queue, then the entity is no longer a candidate for
* service. If such an entity is a queue, then the entity is no longer * next service according to the that definition, because entity is
* a candidate for next service (i.e, a candidate entity to serve * about to become the in-service queue. This function then returns
* after the in-service entity is expired). The function then returns * true if entity is a queue.
* true.
* *
* In contrast, the entity could stil be a candidate for next service * In contrast, entity could still be a candidate for next service if
* if it is not a queue, and has more than one child. In fact, even if * it is not a queue, and has more than one active child. In fact,
* one of its children is about to be set in service, other children * even if one of its children is about to be set in service, other
* may still be the next to serve. As a consequence, a non-queue * active children may still be the next to serve, for the parent
* entity is not a candidate for next-service only if it has only one * entity, even according to the above definition. As a consequence, a
* child. And only if this condition holds, then the function returns * non-queue entity is not a candidate for next-service only if it has
* true for a non-queue entity. * only one active child. And only if this condition holds, then this
* function returns true for a non-queue entity.
*/ */
static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
{ {
...@@ -213,6 +215,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) ...@@ -213,6 +215,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
bfqg = container_of(entity, struct bfq_group, entity); bfqg = container_of(entity, struct bfq_group, entity);
/*
* The field active_entities does not always contain the
* actual number of active children entities: it happens to
* not account for the in-service entity in case the latter is
* removed from its active tree (which may get done after
* invoking the function bfq_no_longer_next_in_service in
* bfq_get_next_queue). Fortunately, here, i.e., while
* bfq_no_longer_next_in_service is not yet completed in
* bfq_get_next_queue, bfq_active_extract has not yet been
* invoked, and thus active_entities still coincides with the
* actual number of active entities.
*/
if (bfqg->active_entities == 1) if (bfqg->active_entities == 1)
return true; return true;
...@@ -954,7 +968,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, ...@@ -954,7 +968,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
* one of its children receives a new request. * one of its children receives a new request.
* *
* Basically, this function updates the timestamps of entity and * Basically, this function updates the timestamps of entity and
* inserts entity into its active tree, ater possible extracting it * inserts entity into its active tree, ater possibly extracting it
* from its idle tree. * from its idle tree.
*/ */
static void __bfq_activate_entity(struct bfq_entity *entity, static void __bfq_activate_entity(struct bfq_entity *entity,
...@@ -1048,7 +1062,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity) ...@@ -1048,7 +1062,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
entity->start = entity->finish; entity->start = entity->finish;
/* /*
* In addition, if the entity had more than one child * In addition, if the entity had more than one child
* when set in service, then was not extracted from * when set in service, then it was not extracted from
* the active tree. This implies that the position of * the active tree. This implies that the position of
* the entity in the active tree may need to be * the entity in the active tree may need to be
* changed now, because we have just updated the start * changed now, because we have just updated the start
...@@ -1056,9 +1070,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity) ...@@ -1056,9 +1070,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
* time in a moment (the requeueing is then, more * time in a moment (the requeueing is then, more
* precisely, a repositioning in this case). To * precisely, a repositioning in this case). To
* implement this repositioning, we: 1) dequeue the * implement this repositioning, we: 1) dequeue the
* entity here, 2) update the finish time and * entity here, 2) update the finish time and requeue
* requeue the entity according to the new * the entity according to the new timestamps below.
* timestamps below.
*/ */
if (entity->tree) if (entity->tree)
bfq_active_extract(st, entity); bfq_active_extract(st, entity);
...@@ -1105,9 +1118,10 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity, ...@@ -1105,9 +1118,10 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
/** /**
* bfq_activate_entity - activate or requeue an entity representing a bfq_queue, * bfq_activate_requeue_entity - activate or requeue an entity representing a
* and activate, requeue or reposition all ancestors * bfq_queue, and activate, requeue or reposition
* for which such an update becomes necessary. * all ancestors for which such an update becomes
* necessary.
* @entity: the entity to activate. * @entity: the entity to activate.
* @non_blocking_wait_rq: true if this entity was waiting for a request * @non_blocking_wait_rq: true if this entity was waiting for a request
* @requeue: true if this is a requeue, which implies that bfqq is * @requeue: true if this is a requeue, which implies that bfqq is
...@@ -1135,9 +1149,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity, ...@@ -1135,9 +1149,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
* @ins_into_idle_tree: if false, the entity will not be put into the * @ins_into_idle_tree: if false, the entity will not be put into the
* idle tree. * idle tree.
* *
* Deactivates an entity, independently from its previous state. Must * Deactivates an entity, independently of its previous state. Must
* be invoked only if entity is on a service tree. Extracts the entity * be invoked only if entity is on a service tree. Extracts the entity
* from that tree, and if necessary and allowed, puts it on the idle * from that tree, and if necessary and allowed, puts it into the idle
* tree. * tree.
*/ */
bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
...@@ -1158,8 +1172,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) ...@@ -1158,8 +1172,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
st = bfq_entity_service_tree(entity); st = bfq_entity_service_tree(entity);
is_in_service = entity == sd->in_service_entity; is_in_service = entity == sd->in_service_entity;
if (is_in_service) if (is_in_service) {
bfq_calc_finish(entity, entity->service); bfq_calc_finish(entity, entity->service);
sd->in_service_entity = NULL;
}
if (entity->tree == &st->active) if (entity->tree == &st->active)
bfq_active_extract(st, entity); bfq_active_extract(st, entity);
...@@ -1177,7 +1193,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) ...@@ -1177,7 +1193,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
/** /**
* bfq_deactivate_entity - deactivate an entity representing a bfq_queue. * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
* @entity: the entity to deactivate. * @entity: the entity to deactivate.
* @ins_into_idle_tree: true if the entity can be put on the idle tree * @ins_into_idle_tree: true if the entity can be put into the idle tree
*/ */
static void bfq_deactivate_entity(struct bfq_entity *entity, static void bfq_deactivate_entity(struct bfq_entity *entity,
bool ins_into_idle_tree, bool ins_into_idle_tree,
...@@ -1208,16 +1224,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity, ...@@ -1208,16 +1224,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
*/ */
bfq_update_next_in_service(sd, NULL); bfq_update_next_in_service(sd, NULL);
if (sd->next_in_service) if (sd->next_in_service || sd->in_service_entity) {
/* /*
* The parent entity is still backlogged, * The parent entity is still active, because
* because next_in_service is not NULL. So, no * either next_in_service or in_service_entity
* further upwards deactivation must be * is not NULL. So, no further upwards
* performed. Yet, next_in_service has * deactivation must be performed. Yet,
* changed. Then the schedule does need to be * next_in_service has changed. Then the
* updated upwards. * schedule does need to be updated upwards.
*
* NOTE If in_service_entity is not NULL, then
* next_in_service may happen to be NULL,
* although the parent entity is evidently
* active. This happens if 1) the entity
* pointed by in_service_entity is the only
* active entity in the parent entity, and 2)
* according to the definition of
* next_in_service, the in_service_entity
* cannot be considered as
* next_in_service. See the comments on the
* definition of next_in_service for details.
*/ */
break; break;
}
/* /*
* If we get here, then the parent is no more * If we get here, then the parent is no more
...@@ -1494,47 +1523,34 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) ...@@ -1494,47 +1523,34 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
/* /*
* If entity is no longer a candidate for next * If entity is no longer a candidate for next
* service, then we extract it from its active tree, * service, then it must be extracted from its active
* for the following reason. To further boost the * tree, so as to make sure that it won't be
* throughput in some special case, BFQ needs to know * considered when computing next_in_service. See the
* which is the next candidate entity to serve, while * comments on the function
* there is already an entity in service. In this * bfq_no_longer_next_in_service() for details.
* respect, to make it easy to compute/update the next
* candidate entity to serve after the current
* candidate has been set in service, there is a case
* where it is necessary to extract the current
* candidate from its service tree. Such a case is
* when the entity just set in service cannot be also
* a candidate for next service. Details about when
* this conditions holds are reported in the comments
* on the function bfq_no_longer_next_in_service()
* invoked below.
*/ */
if (bfq_no_longer_next_in_service(entity)) if (bfq_no_longer_next_in_service(entity))
bfq_active_extract(bfq_entity_service_tree(entity), bfq_active_extract(bfq_entity_service_tree(entity),
entity); entity);
/* /*
* For the same reason why we may have just extracted * Even if entity is not to be extracted according to
* entity from its active tree, we may need to update * the above check, a descendant entity may get
* next_in_service for the sched_data of entity too, * extracted in one of the next iterations of this
* regardless of whether entity has been extracted. * loop. Such an event could cause a change in
* In fact, even if entity has not been extracted, a * next_in_service for the level of the descendant
* descendant entity may get extracted. Such an event * entity, and thus possibly back to this level.
* would cause a change in next_in_service for the
* level of the descendant entity, and thus possibly
* back to upper levels.
* *
* We cannot perform the resulting needed update * However, we cannot perform the resulting needed
* before the end of this loop, because, to know which * update of next_in_service for this level before the
* is the correct next-to-serve candidate entity for * end of the whole loop, because, to know which is
* each level, we need first to find the leaf entity * the correct next-to-serve candidate entity for each
* to set in service. In fact, only after we know * level, we need first to find the leaf entity to set
* which is the next-to-serve leaf entity, we can * in service. In fact, only after we know which is
* discover whether the parent entity of the leaf * the next-to-serve leaf entity, we can discover
* entity becomes the next-to-serve, and so on. * whether the parent entity of the leaf entity
* becomes the next-to-serve, and so on.
*/ */
} }
bfqq = bfq_entity_to_bfqq(entity); bfqq = bfq_entity_to_bfqq(entity);
......
...@@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q, ...@@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q,
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
struct request *rq; struct request *rq;
unsigned int tag; unsigned int tag;
struct blk_mq_ctx *local_ctx = NULL;
blk_queue_enter_live(q); blk_queue_enter_live(q);
data->q = q; data->q = q;
if (likely(!data->ctx)) if (likely(!data->ctx))
data->ctx = blk_mq_get_ctx(q); data->ctx = local_ctx = blk_mq_get_ctx(q);
if (likely(!data->hctx)) if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->ctx->cpu); data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
if (op & REQ_NOWAIT) if (op & REQ_NOWAIT)
...@@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q, ...@@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
tag = blk_mq_get_tag(data); tag = blk_mq_get_tag(data);
if (tag == BLK_MQ_TAG_FAIL) { if (tag == BLK_MQ_TAG_FAIL) {
if (local_ctx) {
blk_mq_put_ctx(local_ctx);
data->ctx = NULL;
}
blk_queue_exit(q); blk_queue_exit(q);
return NULL; return NULL;
} }
...@@ -356,12 +361,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, ...@@ -356,12 +361,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
rq = blk_mq_get_request(q, NULL, op, &alloc_data); rq = blk_mq_get_request(q, NULL, op, &alloc_data);
blk_mq_put_ctx(alloc_data.ctx);
blk_queue_exit(q);
if (!rq) if (!rq)
return ERR_PTR(-EWOULDBLOCK); return ERR_PTR(-EWOULDBLOCK);
blk_mq_put_ctx(alloc_data.ctx);
blk_queue_exit(q);
rq->__data_len = 0; rq->__data_len = 0;
rq->__sector = (sector_t) -1; rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL; rq->bio = rq->biotail = NULL;
...@@ -407,11 +412,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, ...@@ -407,11 +412,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
rq = blk_mq_get_request(q, NULL, op, &alloc_data); rq = blk_mq_get_request(q, NULL, op, &alloc_data);
blk_queue_exit(q);
if (!rq) if (!rq)
return ERR_PTR(-EWOULDBLOCK); return ERR_PTR(-EWOULDBLOCK);
blk_queue_exit(q);
return rq; return rq;
} }
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment