Commit 4acb8341 authored by Keith Busch's avatar Keith Busch Committed by Jens Axboe

sbitmap: fix batched wait_cnt accounting

Batched completions can clear multiple bits, but we're only decrementing
the wait_cnt by one each time. This can cause waiters to never be woken,
stalling IO. Use the batched count instead.

Link: https://bugzilla.kernel.org/show_bug.cgi?id=215679Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
Link: https://lore.kernel.org/r/20220909184022.1709476-1-kbusch@fb.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c35227d4
...@@ -196,7 +196,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) ...@@ -196,7 +196,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
* other allocations on previous queue won't be starved. * other allocations on previous queue won't be starved.
*/ */
if (bt != bt_prev) if (bt != bt_prev)
sbitmap_queue_wake_up(bt_prev); sbitmap_queue_wake_up(bt_prev, 1);
ws = bt_wait_ptr(bt, data->hctx); ws = bt_wait_ptr(bt, data->hctx);
} while (1); } while (1);
......
...@@ -575,8 +575,9 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq); ...@@ -575,8 +575,9 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
* sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
* on a &struct sbitmap_queue. * on a &struct sbitmap_queue.
* @sbq: Bitmap queue to wake up. * @sbq: Bitmap queue to wake up.
* @nr: Number of bits cleared.
*/ */
void sbitmap_queue_wake_up(struct sbitmap_queue *sbq); void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
/** /**
* sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
......
...@@ -599,24 +599,31 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) ...@@ -599,24 +599,31 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
return NULL; return NULL;
} }
static bool __sbq_wake_up(struct sbitmap_queue *sbq) static bool __sbq_wake_up(struct sbitmap_queue *sbq, int *nr)
{ {
struct sbq_wait_state *ws; struct sbq_wait_state *ws;
unsigned int wake_batch; unsigned int wake_batch;
int wait_cnt; int wait_cnt, cur, sub;
bool ret; bool ret;
if (*nr <= 0)
return false;
ws = sbq_wake_ptr(sbq); ws = sbq_wake_ptr(sbq);
if (!ws) if (!ws)
return false; return false;
wait_cnt = atomic_dec_return(&ws->wait_cnt); cur = atomic_read(&ws->wait_cnt);
/* do {
* For concurrent callers of this, callers should call this function /*
* again to wakeup a new batch on a different 'ws'. * For concurrent callers of this, callers should call this
*/ * function again to wakeup a new batch on a different 'ws'.
if (wait_cnt < 0) */
return true; if (cur == 0)
return true;
sub = min(*nr, cur);
wait_cnt = cur - sub;
} while (!atomic_try_cmpxchg(&ws->wait_cnt, &cur, wait_cnt));
/* /*
* If we decremented queue without waiters, retry to avoid lost * If we decremented queue without waiters, retry to avoid lost
...@@ -625,6 +632,8 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq) ...@@ -625,6 +632,8 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq)
if (wait_cnt > 0) if (wait_cnt > 0)
return !waitqueue_active(&ws->wait); return !waitqueue_active(&ws->wait);
*nr -= sub;
/* /*
* When wait_cnt == 0, we have to be particularly careful as we are * When wait_cnt == 0, we have to be particularly careful as we are
* responsible to reset wait_cnt regardless whether we've actually * responsible to reset wait_cnt regardless whether we've actually
...@@ -660,12 +669,12 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq) ...@@ -660,12 +669,12 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq)
sbq_index_atomic_inc(&sbq->wake_index); sbq_index_atomic_inc(&sbq->wake_index);
atomic_set(&ws->wait_cnt, wake_batch); atomic_set(&ws->wait_cnt, wake_batch);
return ret; return ret || *nr;
} }
void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{ {
while (__sbq_wake_up(sbq)) while (__sbq_wake_up(sbq, &nr))
; ;
} }
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
...@@ -705,7 +714,7 @@ void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, ...@@ -705,7 +714,7 @@ void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
atomic_long_andnot(mask, (atomic_long_t *) addr); atomic_long_andnot(mask, (atomic_long_t *) addr);
smp_mb__after_atomic(); smp_mb__after_atomic();
sbitmap_queue_wake_up(sbq); sbitmap_queue_wake_up(sbq, nr_tags);
sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(), sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
tags[nr_tags - 1] - offset); tags[nr_tags - 1] - offset);
} }
...@@ -733,7 +742,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, ...@@ -733,7 +742,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
* waiter. See the comment on waitqueue_active(). * waiter. See the comment on waitqueue_active().
*/ */
smp_mb__after_atomic(); smp_mb__after_atomic();
sbitmap_queue_wake_up(sbq); sbitmap_queue_wake_up(sbq, 1);
sbitmap_update_cpu_hint(&sbq->sb, cpu, nr); sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
} }
EXPORT_SYMBOL_GPL(sbitmap_queue_clear); EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment