Commit 6fa3e8d3 authored by Paolo Valente's avatar Paolo Valente Committed by Jens Axboe

block, bfq: remove all get and put of I/O contexts

When a bfq queue is set in service and when it is merged, a reference
to the I/O context associated with the queue is taken. This reference
is then released when the queue is deselected from service or
split. More precisely, the release of the reference is postponed to
when the scheduler lock is released, to avoid nesting between the
scheduler and the I/O-context lock. In fact, such nesting would lead
to deadlocks, because of other code paths that take the same locks in
the opposite order. This postponing of I/O-context releases does
complicate code.

This commit addresses these issue by modifying involved operations in
such a way to not need to get the above I/O-context references any
more. Then it also removes any get and release of these references.
Signed-off-by: default avatarPaolo Valente <paolo.valente@linaro.org>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent e1b2324d
...@@ -538,8 +538,6 @@ struct bfq_data { ...@@ -538,8 +538,6 @@ struct bfq_data {
/* bfq_queue in service */ /* bfq_queue in service */
struct bfq_queue *in_service_queue; struct bfq_queue *in_service_queue;
/* bfq_io_cq (bic) associated with the @in_service_queue */
struct bfq_io_cq *in_service_bic;
/* on-disk position of the last served request */ /* on-disk position of the last served request */
sector_t last_position; sector_t last_position;
...@@ -704,15 +702,6 @@ struct bfq_data { ...@@ -704,15 +702,6 @@ struct bfq_data {
struct bfq_io_cq *bio_bic; struct bfq_io_cq *bio_bic;
/* bfqq associated with the task issuing current bio for merging */ /* bfqq associated with the task issuing current bio for merging */
struct bfq_queue *bio_bfqq; struct bfq_queue *bio_bfqq;
/*
* io context to put right after bfqd->lock is released. This
* filed is used to perform put_io_context, when needed, to
* after the scheduler lock has been released, and thus
* prevent an ioc->lock from being possibly taken while the
* scheduler lock is being held.
*/
struct io_context *ioc_to_put;
}; };
enum bfqq_state_flags { enum bfqq_state_flags {
...@@ -1148,34 +1137,6 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd) ...@@ -1148,34 +1137,6 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd)
} }
} }
/*
* Next two functions release bfqd->lock and put the io context
* pointed by bfqd->ioc_to_put. This delayed put is used to not risk
* to take an ioc->lock while the scheduler lock is being held.
*/
static void bfq_unlock_put_ioc(struct bfq_data *bfqd)
{
struct io_context *ioc_to_put = bfqd->ioc_to_put;
bfqd->ioc_to_put = NULL;
spin_unlock_irq(&bfqd->lock);
if (ioc_to_put)
put_io_context(ioc_to_put);
}
static void bfq_unlock_put_ioc_restore(struct bfq_data *bfqd,
unsigned long flags)
{
struct io_context *ioc_to_put = bfqd->ioc_to_put;
bfqd->ioc_to_put = NULL;
spin_unlock_irqrestore(&bfqd->lock, flags);
if (ioc_to_put)
put_io_context(ioc_to_put);
}
/** /**
* bfq_gt - compare two timestamps. * bfq_gt - compare two timestamps.
* @a: first ts. * @a: first ts.
...@@ -2684,18 +2645,6 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) ...@@ -2684,18 +2645,6 @@ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity; struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
struct bfq_entity *entity = in_serv_entity; struct bfq_entity *entity = in_serv_entity;
if (bfqd->in_service_bic) {
/*
* Schedule the release of a reference to
* bfqd->in_service_bic->icq.ioc to right after the
* scheduler lock is released. This ioc is not
* released immediately, to not risk to possibly take
* an ioc->lock while holding the scheduler lock.
*/
bfqd->ioc_to_put = bfqd->in_service_bic->icq.ioc;
bfqd->in_service_bic = NULL;
}
bfq_clear_bfqq_wait_request(in_serv_bfqq); bfq_clear_bfqq_wait_request(in_serv_bfqq);
hrtimer_try_to_cancel(&bfqd->idle_slice_timer); hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
bfqd->in_service_queue = NULL; bfqd->in_service_queue = NULL;
...@@ -3495,7 +3444,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) ...@@ -3495,7 +3444,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
__bfq_deactivate_entity(entity, false); __bfq_deactivate_entity(entity, false);
bfq_put_async_queues(bfqd, bfqg); bfq_put_async_queues(bfqd, bfqg);
bfq_unlock_put_ioc_restore(bfqd, flags); spin_unlock_irqrestore(&bfqd->lock, flags);
/* /*
* @blkg is going offline and will be ignored by * @blkg is going offline and will be ignored by
* blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
...@@ -5472,20 +5421,18 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) ...@@ -5472,20 +5421,18 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
* first time that the requests of some process are redirected to * first time that the requests of some process are redirected to
* it. * it.
* *
* We redirect bfqq to new_bfqq and not the opposite, because we * We redirect bfqq to new_bfqq and not the opposite, because
* are in the context of the process owning bfqq, hence we have * we are in the context of the process owning bfqq, thus we
* the io_cq of this process. So we can immediately configure this * have the io_cq of this process. So we can immediately
* io_cq to redirect the requests of the process to new_bfqq. * configure this io_cq to redirect the requests of the
* process to new_bfqq. In contrast, the io_cq of new_bfqq is
* not available any more (new_bfqq->bic == NULL).
* *
* NOTE, even if new_bfqq coincides with the in-service queue, the * Anyway, even in case new_bfqq coincides with the in-service
* io_cq of new_bfqq is not available, because, if the in-service * queue, redirecting requests the in-service queue is the
* queue is shared, bfqd->in_service_bic may not point to the * best option, as we feed the in-service queue with new
* io_cq of the in-service queue. * requests close to the last request served and, by doing so,
* Redirecting the requests of the process owning bfqq to the * are likely to increase the throughput.
* currently in-service queue is in any case the best option, as
* we feed the in-service queue with new requests close to the
* last request served and, by doing so, hopefully increase the
* throughput.
*/ */
bfqq->new_bfqq = new_bfqq; bfqq->new_bfqq = new_bfqq;
new_bfqq->ref += process_refs; new_bfqq->ref += process_refs;
...@@ -5577,8 +5524,8 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, ...@@ -5577,8 +5524,8 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
in_service_bfqq = bfqd->in_service_queue; in_service_bfqq = bfqd->in_service_queue;
if (!in_service_bfqq || in_service_bfqq == bfqq || if (!in_service_bfqq || in_service_bfqq == bfqq
!bfqd->in_service_bic || wr_from_too_long(in_service_bfqq) || || wr_from_too_long(in_service_bfqq) ||
unlikely(in_service_bfqq == &bfqd->oom_bfqq)) unlikely(in_service_bfqq == &bfqd->oom_bfqq))
goto check_scheduled; goto check_scheduled;
...@@ -5629,16 +5576,6 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) ...@@ -5629,16 +5576,6 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
} }
static void bfq_get_bic_reference(struct bfq_queue *bfqq)
{
/*
* If bfqq->bic has a non-NULL value, the bic to which it belongs
* is about to begin using a shared bfq_queue.
*/
if (bfqq->bic)
atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
}
static void static void
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
...@@ -5682,12 +5619,6 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, ...@@ -5682,12 +5619,6 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d", bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
bfqd->wr_busy_queues); bfqd->wr_busy_queues);
/*
* Grab a reference to the bic, to prevent it from being destroyed
* before being possibly touched by a bfq_split_bfqq().
*/
bfq_get_bic_reference(bfqq);
bfq_get_bic_reference(new_bfqq);
/* /*
* Merge queues (that is, let bic redirect its requests to new_bfqq) * Merge queues (that is, let bic redirect its requests to new_bfqq)
*/ */
...@@ -5853,14 +5784,8 @@ static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) ...@@ -5853,14 +5784,8 @@ static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
static void bfq_arm_slice_timer(struct bfq_data *bfqd) static void bfq_arm_slice_timer(struct bfq_data *bfqd)
{ {
struct bfq_queue *bfqq = bfqd->in_service_queue; struct bfq_queue *bfqq = bfqd->in_service_queue;
struct bfq_io_cq *bic;
u32 sl; u32 sl;
/* Processes have exited, don't wait. */
bic = bfqd->in_service_bic;
if (!bic || atomic_read(&bic->icq.ioc->active_ref) == 0)
return;
bfq_mark_bfqq_wait_request(bfqq); bfq_mark_bfqq_wait_request(bfqq);
/* /*
...@@ -7147,11 +7072,6 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, ...@@ -7147,11 +7072,6 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
*/ */
bfq_update_wr_data(bfqd, bfqq); bfq_update_wr_data(bfqd, bfqq);
if (!bfqd->in_service_bic) {
atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
bfqd->in_service_bic = RQ_BIC(rq);
}
/* /*
* Expire bfqq, pretending that its budget expired, if bfqq * Expire bfqq, pretending that its budget expired, if bfqq
* belongs to CLASS_IDLE and other queues are waiting for * belongs to CLASS_IDLE and other queues are waiting for
...@@ -7272,7 +7192,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) ...@@ -7272,7 +7192,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
spin_lock_irq(&bfqd->lock); spin_lock_irq(&bfqd->lock);
rq = __bfq_dispatch_request(hctx); rq = __bfq_dispatch_request(hctx);
bfq_unlock_put_ioc(bfqd); spin_unlock_irq(&bfqd->lock);
return rq; return rq;
} }
...@@ -7360,20 +7280,9 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) ...@@ -7360,20 +7280,9 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&bfqd->lock, flags); spin_lock_irqsave(&bfqd->lock, flags);
/*
* If the bic is using a shared queue, put the
* reference taken on the io_context when the bic
* started using a shared bfq_queue. This put cannot
* make ioc->ref_count reach 0, then no ioc->lock
* risks to be taken (leading to possible deadlock
* scenarios).
*/
if (is_sync && bfq_bfqq_coop(bfqq))
put_io_context(bic->icq.ioc);
bfq_exit_bfqq(bfqd, bfqq); bfq_exit_bfqq(bfqd, bfqq);
bic_set_bfqq(bic, NULL, is_sync); bic_set_bfqq(bic, NULL, is_sync);
bfq_unlock_put_ioc_restore(bfqd, flags); spin_unlock_irqrestore(&bfqd->lock, flags);
} }
} }
...@@ -7808,7 +7717,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -7808,7 +7717,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
} }
} }
bfq_unlock_put_ioc(bfqd); spin_unlock_irq(&bfqd->lock);
} }
static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
...@@ -7962,7 +7871,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq) ...@@ -7962,7 +7871,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
bfq_completed_request(bfqq, bfqd); bfq_completed_request(bfqq, bfqd);
bfq_put_rq_priv_body(bfqq); bfq_put_rq_priv_body(bfqq);
bfq_unlock_put_ioc_restore(bfqd, flags); spin_unlock_irqrestore(&bfqd->lock, flags);
} else { } else {
/* /*
* Request rq may be still/already in the scheduler, * Request rq may be still/already in the scheduler,
...@@ -8055,6 +7964,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, ...@@ -8055,6 +7964,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
const int is_sync = rq_is_sync(rq); const int is_sync = rq_is_sync(rq);
struct bfq_queue *bfqq; struct bfq_queue *bfqq;
bool new_queue = false; bool new_queue = false;
bool split = false;
spin_lock_irq(&bfqd->lock); spin_lock_irq(&bfqd->lock);
...@@ -8078,14 +7988,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, ...@@ -8078,14 +7988,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
bic->saved_in_large_burst = true; bic->saved_in_large_burst = true;
bfqq = bfq_split_bfqq(bic, bfqq); bfqq = bfq_split_bfqq(bic, bfqq);
/* split = true;
* A reference to bic->icq.ioc needs to be
* released after a queue split. Do not do it
* immediately, to not risk to possibly take
* an ioc->lock while holding the scheduler
* lock.
*/
bfqd->ioc_to_put = bic->icq.ioc;
if (!bfqq) if (!bfqq)
bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
...@@ -8110,7 +8013,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, ...@@ -8110,7 +8013,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
*/ */
if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
bfqq->bic = bic; bfqq->bic = bic;
if (bfqd->ioc_to_put) { /* if true, there has been a split */ if (split) {
/* /*
* The queue has just been split from a shared * The queue has just been split from a shared
* queue: restore the idle window and the * queue: restore the idle window and the
...@@ -8123,7 +8026,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq, ...@@ -8123,7 +8026,7 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
if (unlikely(bfq_bfqq_just_created(bfqq))) if (unlikely(bfq_bfqq_just_created(bfqq)))
bfq_handle_burst(bfqd, bfqq); bfq_handle_burst(bfqd, bfqq);
bfq_unlock_put_ioc(bfqd); spin_unlock_irq(&bfqd->lock);
return 0; return 0;
...@@ -8168,7 +8071,7 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) ...@@ -8168,7 +8071,7 @@ static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
bfq_bfqq_expire(bfqd, bfqq, true, reason); bfq_bfqq_expire(bfqd, bfqq, true, reason);
schedule_dispatch: schedule_dispatch:
bfq_unlock_put_ioc_restore(bfqd, flags); spin_unlock_irqrestore(&bfqd->lock, flags);
bfq_schedule_dispatch(bfqd); bfq_schedule_dispatch(bfqd);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment