Commit 73d58118 authored by Paolo Valente's avatar Paolo Valente Committed by Jens Axboe

block, bfq: consider also ioprio classes in symmetry detection

In asymmetric scenarios, i.e., when some bfq_queue or bfq_group needs to
be guaranteed a different bandwidth than other bfq_queues or bfq_groups,
these service guaranteed can be provided only by plugging I/O dispatch,
completely or partially, when the queue in service remains temporarily
empty. A case where asymmetry is particularly strong is when some active
bfq_queues belong to a higher-priority class than some other active
bfq_queues. Unfortunately, this important case is not considered at all
in the code for detecting asymmetric scenarios. This commit adds the
missing logic.
Signed-off-by: default avatarPaolo Valente <paolo.valente@linaro.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 03e565e4
...@@ -623,26 +623,6 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) ...@@ -623,26 +623,6 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfqq->pos_root = NULL; bfqq->pos_root = NULL;
} }
/*
* Tell whether there are active queues with different weights or
* active groups.
*/
static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
{
/*
* For queue weights to differ, queue_weights_tree must contain
* at least two nodes.
*/
return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
(bfqd->queue_weights_tree.rb_node->rb_left ||
bfqd->queue_weights_tree.rb_node->rb_right)
#ifdef CONFIG_BFQ_GROUP_IOSCHED
) ||
(bfqd->num_groups_with_pending_reqs > 0
#endif
);
}
/* /*
* The following function returns true if every queue must receive the * The following function returns true if every queue must receive the
* same share of the throughput (this condition is used when deciding * same share of the throughput (this condition is used when deciding
...@@ -651,25 +631,48 @@ static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd) ...@@ -651,25 +631,48 @@ static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
* *
* Such a scenario occurs when: * Such a scenario occurs when:
* 1) all active queues have the same weight, * 1) all active queues have the same weight,
* 2) all active groups at the same level in the groups tree have the same * 2) all active queues belong to the same I/O-priority class,
* weight,
* 3) all active groups at the same level in the groups tree have the same * 3) all active groups at the same level in the groups tree have the same
* weight,
* 4) all active groups at the same level in the groups tree have the same
* number of children. * number of children.
* *
* Unfortunately, keeping the necessary state for evaluating exactly * Unfortunately, keeping the necessary state for evaluating exactly
* the last two symmetry sub-conditions above would be quite complex * the last two symmetry sub-conditions above would be quite complex
* and time consuming. Therefore this function evaluates, instead, * and time consuming. Therefore this function evaluates, instead,
* only the following stronger two sub-conditions, for which it is * only the following stronger three sub-conditions, for which it is
* much easier to maintain the needed state: * much easier to maintain the needed state:
* 1) all active queues have the same weight, * 1) all active queues have the same weight,
* 2) there are no active groups. * 2) all active queues belong to the same I/O-priority class,
* 3) there are no active groups.
* In particular, the last condition is always true if hierarchical * In particular, the last condition is always true if hierarchical
* support or the cgroups interface are not enabled, thus no state * support or the cgroups interface are not enabled, thus no state
* needs to be maintained in this case. * needs to be maintained in this case.
*/ */
static bool bfq_symmetric_scenario(struct bfq_data *bfqd) static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
{ {
return !bfq_varied_queue_weights_or_active_groups(bfqd); /*
* For queue weights to differ, queue_weights_tree must contain
* at least two nodes.
*/
bool varied_queue_weights = !RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
(bfqd->queue_weights_tree.rb_node->rb_left ||
bfqd->queue_weights_tree.rb_node->rb_right);
bool multiple_classes_busy =
(bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
(bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
(bfqd->busy_queues[1] && bfqd->busy_queues[2]);
/*
* For queue weights to differ, queue_weights_tree must contain
* at least two nodes.
*/
return !(varied_queue_weights || multiple_classes_busy
#ifdef BFQ_GROUP_IOSCHED_ENABLED
|| bfqd->num_groups_with_pending_reqs > 0
#endif
);
} }
/* /*
...@@ -728,15 +731,14 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq, ...@@ -728,15 +731,14 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
/* /*
* In the unlucky event of an allocation failure, we just * In the unlucky event of an allocation failure, we just
* exit. This will cause the weight of queue to not be * exit. This will cause the weight of queue to not be
* considered in bfq_varied_queue_weights_or_active_groups, * considered in bfq_symmetric_scenario, which, in its turn,
* which, in its turn, causes the scenario to be deemed * causes the scenario to be deemed wrongly symmetric in case
* wrongly symmetric in case bfqq's weight would have been * bfqq's weight would have been the only weight making the
* the only weight making the scenario asymmetric. On the * scenario asymmetric. On the bright side, no unbalance will
* bright side, no unbalance will however occur when bfqq * however occur when bfqq becomes inactive again (the
* becomes inactive again (the invocation of this function * invocation of this function is triggered by an activation
* is triggered by an activation of queue). In fact, * of queue). In fact, bfq_weights_tree_remove does nothing
* bfq_weights_tree_remove does nothing if * if !bfqq->weight_counter.
* !bfqq->weight_counter.
*/ */
if (unlikely(!bfqq->weight_counter)) if (unlikely(!bfqq->weight_counter))
return; return;
...@@ -2227,7 +2229,7 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, ...@@ -2227,7 +2229,7 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
return NULL; return NULL;
/* If there is only one backlogged queue, don't search. */ /* If there is only one backlogged queue, don't search. */
if (bfqd->busy_queues == 1) if (bfq_tot_busy_queues(bfqd) == 1)
return NULL; return NULL;
in_service_bfqq = bfqd->in_service_queue; in_service_bfqq = bfqd->in_service_queue;
...@@ -3681,7 +3683,8 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq) ...@@ -3681,7 +3683,8 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* the requests already queued in the device have been served. * the requests already queued in the device have been served.
*/ */
asymmetric_scenario = (bfqq->wr_coeff > 1 && asymmetric_scenario = (bfqq->wr_coeff > 1 &&
bfqd->wr_busy_queues < bfqd->busy_queues) || bfqd->wr_busy_queues <
bfq_tot_busy_queues(bfqd)) ||
!bfq_symmetric_scenario(bfqd); !bfq_symmetric_scenario(bfqd);
/* /*
...@@ -3960,7 +3963,7 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, ...@@ -3960,7 +3963,7 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
* belongs to CLASS_IDLE and other queues are waiting for * belongs to CLASS_IDLE and other queues are waiting for
* service. * service.
*/ */
if (!(bfqd->busy_queues > 1 && bfq_class_idle(bfqq))) if (!(bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq)))
goto return_rq; goto return_rq;
bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED); bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
...@@ -3978,7 +3981,7 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) ...@@ -3978,7 +3981,7 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
* most a call to dispatch for nothing * most a call to dispatch for nothing
*/ */
return !list_empty_careful(&bfqd->dispatch) || return !list_empty_careful(&bfqd->dispatch) ||
bfqd->busy_queues > 0; bfq_tot_busy_queues(bfqd) > 0;
} }
static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
...@@ -4032,9 +4035,10 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) ...@@ -4032,9 +4035,10 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
goto start_rq; goto start_rq;
} }
bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); bfq_log(bfqd, "dispatch requests: %d busy queues",
bfq_tot_busy_queues(bfqd));
if (bfqd->busy_queues == 0) if (bfq_tot_busy_queues(bfqd) == 0)
goto exit; goto exit;
/* /*
......
...@@ -501,10 +501,11 @@ struct bfq_data { ...@@ -501,10 +501,11 @@ struct bfq_data {
unsigned int num_groups_with_pending_reqs; unsigned int num_groups_with_pending_reqs;
/* /*
* Number of bfq_queues containing requests (including the * Per-class (RT, BE, IDLE) number of bfq_queues containing
* queue in service, even if it is idling). * requests (including the queue in service, even if it is
* idling).
*/ */
int busy_queues; unsigned int busy_queues[3];
/* number of weight-raised busy @bfq_queues */ /* number of weight-raised busy @bfq_queues */
int wr_busy_queues; int wr_busy_queues;
/* number of queued requests */ /* number of queued requests */
...@@ -974,6 +975,7 @@ extern struct blkcg_policy blkcg_policy_bfq; ...@@ -974,6 +975,7 @@ extern struct blkcg_policy blkcg_policy_bfq;
struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq); struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq);
struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity); struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd);
struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity); struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity);
struct bfq_entity *bfq_entity_of(struct rb_node *node); struct bfq_entity *bfq_entity_of(struct rb_node *node);
unsigned short bfq_ioprio_to_weight(int ioprio); unsigned short bfq_ioprio_to_weight(int ioprio);
......
...@@ -44,6 +44,12 @@ static unsigned int bfq_class_idx(struct bfq_entity *entity) ...@@ -44,6 +44,12 @@ static unsigned int bfq_class_idx(struct bfq_entity *entity)
BFQ_DEFAULT_GRP_CLASS - 1; BFQ_DEFAULT_GRP_CLASS - 1;
} }
unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd)
{
return bfqd->busy_queues[0] + bfqd->busy_queues[1] +
bfqd->busy_queues[2];
}
static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
bool expiration); bool expiration);
...@@ -1513,7 +1519,7 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) ...@@ -1513,7 +1519,7 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
struct bfq_sched_data *sd; struct bfq_sched_data *sd;
struct bfq_queue *bfqq; struct bfq_queue *bfqq;
if (bfqd->busy_queues == 0) if (bfq_tot_busy_queues(bfqd) == 0)
return NULL; return NULL;
/* /*
...@@ -1665,7 +1671,7 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, ...@@ -1665,7 +1671,7 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_clear_bfqq_busy(bfqq); bfq_clear_bfqq_busy(bfqq);
bfqd->busy_queues--; bfqd->busy_queues[bfqq->ioprio_class - 1]--;
if (!bfqq->dispatched) if (!bfqq->dispatched)
bfq_weights_tree_remove(bfqd, bfqq); bfq_weights_tree_remove(bfqd, bfqq);
...@@ -1688,7 +1694,7 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) ...@@ -1688,7 +1694,7 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_activate_bfqq(bfqd, bfqq); bfq_activate_bfqq(bfqd, bfqq);
bfq_mark_bfqq_busy(bfqq); bfq_mark_bfqq_busy(bfqq);
bfqd->busy_queues++; bfqd->busy_queues[bfqq->ioprio_class - 1]++;
if (!bfqq->dispatched) if (!bfqq->dispatched)
if (bfqq->wr_coeff == 1) if (bfqq->wr_coeff == 1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment