Commit ea9ed87c authored by Pavel Begunkov's avatar Pavel Begunkov Committed by David Sterba

btrfs: fix async discard stall

Might happen that bg->discard_eligible_time was changed without
rescheduling, so btrfs_discard_workfn() wakes up earlier than that new
time, peek_discard_list() returns NULL, and all work halts and goes to
sleep without further rescheduling even there are block groups to
discard.

It happens pretty often, but not so visible from the userspace because
after some time it usually will be kicked off anyway by someone else
calling btrfs_discard_reschedule_work().

Fix it by continue rescheduling if block group discard lists are not
empty.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 675a4fc8
...@@ -199,16 +199,15 @@ static struct btrfs_block_group *find_next_block_group( ...@@ -199,16 +199,15 @@ static struct btrfs_block_group *find_next_block_group(
static struct btrfs_block_group *peek_discard_list( static struct btrfs_block_group *peek_discard_list(
struct btrfs_discard_ctl *discard_ctl, struct btrfs_discard_ctl *discard_ctl,
enum btrfs_discard_state *discard_state, enum btrfs_discard_state *discard_state,
int *discard_index) int *discard_index, u64 now)
{ {
struct btrfs_block_group *block_group; struct btrfs_block_group *block_group;
const u64 now = ktime_get_ns();
spin_lock(&discard_ctl->lock); spin_lock(&discard_ctl->lock);
again: again:
block_group = find_next_block_group(discard_ctl, now); block_group = find_next_block_group(discard_ctl, now);
if (block_group && now > block_group->discard_eligible_time) { if (block_group && now >= block_group->discard_eligible_time) {
if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED && if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
block_group->used != 0) { block_group->used != 0) {
if (btrfs_is_block_group_data_only(block_group)) if (btrfs_is_block_group_data_only(block_group))
...@@ -222,12 +221,11 @@ static struct btrfs_block_group *peek_discard_list( ...@@ -222,12 +221,11 @@ static struct btrfs_block_group *peek_discard_list(
block_group->discard_state = BTRFS_DISCARD_EXTENTS; block_group->discard_state = BTRFS_DISCARD_EXTENTS;
} }
discard_ctl->block_group = block_group; discard_ctl->block_group = block_group;
}
if (block_group) {
*discard_state = block_group->discard_state; *discard_state = block_group->discard_state;
*discard_index = block_group->discard_index; *discard_index = block_group->discard_index;
} else {
block_group = NULL;
} }
spin_unlock(&discard_ctl->lock); spin_unlock(&discard_ctl->lock);
return block_group; return block_group;
...@@ -438,13 +436,18 @@ static void btrfs_discard_workfn(struct work_struct *work) ...@@ -438,13 +436,18 @@ static void btrfs_discard_workfn(struct work_struct *work)
int discard_index = 0; int discard_index = 0;
u64 trimmed = 0; u64 trimmed = 0;
u64 minlen = 0; u64 minlen = 0;
u64 now = ktime_get_ns();
discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work); discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
block_group = peek_discard_list(discard_ctl, &discard_state, block_group = peek_discard_list(discard_ctl, &discard_state,
&discard_index); &discard_index, now);
if (!block_group || !btrfs_run_discard_work(discard_ctl)) if (!block_group || !btrfs_run_discard_work(discard_ctl))
return; return;
if (now < block_group->discard_eligible_time) {
btrfs_discard_schedule_work(discard_ctl, false);
return;
}
/* Perform discarding */ /* Perform discarding */
minlen = discard_minlen[discard_index]; minlen = discard_minlen[discard_index];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment