Commit 57056740 authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: do async reclaim for data reservations

Now that we have the data ticketing stuff in place, move normal data
reservations to use an async reclaim helper to satisfy tickets.  Before
we could have multiple tasks race in and both allocate chunks, resulting
in more data chunks than we would necessarily need.  Serializing these
allocations and making a single thread responsible for flushing will
only allocate chunks as needed, as well as cut down on transaction
commits and other flush related activities.

Priority reservations will still work as they have before, simply
trying to allocate a chunk until they can make their reservation.
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Tested-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent cb3e3930
...@@ -494,7 +494,7 @@ enum btrfs_orphan_cleanup_state { ...@@ -494,7 +494,7 @@ enum btrfs_orphan_cleanup_state {
ORPHAN_CLEANUP_DONE = 2, ORPHAN_CLEANUP_DONE = 2,
}; };
void btrfs_init_async_reclaim_work(struct work_struct *work); void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info);
/* fs_info */ /* fs_info */
struct reloc_control; struct reloc_control;
...@@ -912,6 +912,7 @@ struct btrfs_fs_info { ...@@ -912,6 +912,7 @@ struct btrfs_fs_info {
/* Used to reclaim the metadata space in the background. */ /* Used to reclaim the metadata space in the background. */
struct work_struct async_reclaim_work; struct work_struct async_reclaim_work;
struct work_struct async_data_reclaim_work;
spinlock_t unused_bgs_lock; spinlock_t unused_bgs_lock;
struct list_head unused_bgs; struct list_head unused_bgs;
......
...@@ -2753,7 +2753,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) ...@@ -2753,7 +2753,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
fs_info->check_integrity_print_mask = 0; fs_info->check_integrity_print_mask = 0;
#endif #endif
btrfs_init_balance(fs_info); btrfs_init_balance(fs_info);
btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work); btrfs_init_async_reclaim_work(fs_info);
spin_lock_init(&fs_info->block_group_cache_lock); spin_lock_init(&fs_info->block_group_cache_lock);
fs_info->block_group_cache_tree = RB_ROOT; fs_info->block_group_cache_tree = RB_ROOT;
...@@ -4056,6 +4056,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) ...@@ -4056,6 +4056,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
btrfs_cleanup_defrag_inodes(fs_info); btrfs_cleanup_defrag_inodes(fs_info);
cancel_work_sync(&fs_info->async_reclaim_work); cancel_work_sync(&fs_info->async_reclaim_work);
cancel_work_sync(&fs_info->async_data_reclaim_work);
/* Cancel or finish ongoing discard work */ /* Cancel or finish ongoing discard work */
btrfs_discard_cleanup(fs_info); btrfs_discard_cleanup(fs_info);
......
...@@ -998,9 +998,79 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work) ...@@ -998,9 +998,79 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
} while (flush_state <= COMMIT_TRANS); } while (flush_state <= COMMIT_TRANS);
} }
void btrfs_init_async_reclaim_work(struct work_struct *work) static const enum btrfs_flush_state data_flush_states[] = {
FLUSH_DELALLOC_WAIT,
RUN_DELAYED_IPUTS,
FLUSH_DELAYED_REFS,
COMMIT_TRANS,
};
static void btrfs_async_reclaim_data_space(struct work_struct *work)
{ {
INIT_WORK(work, btrfs_async_reclaim_metadata_space); struct btrfs_fs_info *fs_info;
struct btrfs_space_info *space_info;
u64 last_tickets_id;
int flush_state = 0;
fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
space_info = fs_info->data_sinfo;
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
space_info->flush = 0;
spin_unlock(&space_info->lock);
return;
}
last_tickets_id = space_info->tickets_id;
spin_unlock(&space_info->lock);
while (!space_info->full) {
flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE);
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
space_info->flush = 0;
spin_unlock(&space_info->lock);
return;
}
last_tickets_id = space_info->tickets_id;
spin_unlock(&space_info->lock);
}
while (flush_state < ARRAY_SIZE(data_flush_states)) {
flush_space(fs_info, space_info, U64_MAX,
data_flush_states[flush_state]);
spin_lock(&space_info->lock);
if (list_empty(&space_info->tickets)) {
space_info->flush = 0;
spin_unlock(&space_info->lock);
return;
}
if (last_tickets_id == space_info->tickets_id) {
flush_state++;
} else {
last_tickets_id = space_info->tickets_id;
flush_state = 0;
}
if (flush_state >= ARRAY_SIZE(data_flush_states)) {
if (space_info->full) {
if (maybe_fail_all_tickets(fs_info, space_info))
flush_state = 0;
else
space_info->flush = 0;
} else {
flush_state = 0;
}
}
spin_unlock(&space_info->lock);
}
}
void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
{
INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
} }
static const enum btrfs_flush_state priority_flush_states[] = { static const enum btrfs_flush_state priority_flush_states[] = {
...@@ -1020,13 +1090,6 @@ static const enum btrfs_flush_state evict_flush_states[] = { ...@@ -1020,13 +1090,6 @@ static const enum btrfs_flush_state evict_flush_states[] = {
COMMIT_TRANS, COMMIT_TRANS,
}; };
static const enum btrfs_flush_state data_flush_states[] = {
FLUSH_DELALLOC_WAIT,
RUN_DELAYED_IPUTS,
FLUSH_DELAYED_REFS,
COMMIT_TRANS,
};
static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, struct btrfs_space_info *space_info,
struct reserve_ticket *ticket, struct reserve_ticket *ticket,
...@@ -1059,12 +1122,8 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, ...@@ -1059,12 +1122,8 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, struct btrfs_space_info *space_info,
struct reserve_ticket *ticket, struct reserve_ticket *ticket)
const enum btrfs_flush_state *states,
int states_nr)
{ {
int flush_state = 0;
while (!space_info->full) { while (!space_info->full) {
flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE); flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE);
spin_lock(&space_info->lock); spin_lock(&space_info->lock);
...@@ -1074,17 +1133,6 @@ static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, ...@@ -1074,17 +1133,6 @@ static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
} }
spin_unlock(&space_info->lock); spin_unlock(&space_info->lock);
} }
while (flush_state < states_nr) {
flush_space(fs_info, space_info, U64_MAX, states[flush_state]);
spin_lock(&space_info->lock);
if (ticket->bytes == 0) {
spin_unlock(&space_info->lock);
return;
}
spin_unlock(&space_info->lock);
flush_state++;
}
} }
static void wait_reserve_ticket(struct btrfs_fs_info *fs_info, static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
...@@ -1139,6 +1187,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, ...@@ -1139,6 +1187,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
int ret; int ret;
switch (flush) { switch (flush) {
case BTRFS_RESERVE_FLUSH_DATA:
case BTRFS_RESERVE_FLUSH_ALL: case BTRFS_RESERVE_FLUSH_ALL:
case BTRFS_RESERVE_FLUSH_ALL_STEAL: case BTRFS_RESERVE_FLUSH_ALL_STEAL:
wait_reserve_ticket(fs_info, space_info, ticket); wait_reserve_ticket(fs_info, space_info, ticket);
...@@ -1153,12 +1202,8 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, ...@@ -1153,12 +1202,8 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
evict_flush_states, evict_flush_states,
ARRAY_SIZE(evict_flush_states)); ARRAY_SIZE(evict_flush_states));
break; break;
case BTRFS_RESERVE_FLUSH_DATA:
priority_reclaim_data_space(fs_info, space_info, ticket,
data_flush_states, ARRAY_SIZE(data_flush_states));
break;
case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE: case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
priority_reclaim_data_space(fs_info, space_info, ticket, NULL, 0); priority_reclaim_data_space(fs_info, space_info, ticket);
break; break;
default: default:
ASSERT(0); ASSERT(0);
...@@ -1223,6 +1268,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, ...@@ -1223,6 +1268,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, u64 orig_bytes, struct btrfs_space_info *space_info, u64 orig_bytes,
enum btrfs_reserve_flush_enum flush) enum btrfs_reserve_flush_enum flush)
{ {
struct work_struct *async_work;
struct reserve_ticket ticket; struct reserve_ticket ticket;
u64 used; u64 used;
int ret = 0; int ret = 0;
...@@ -1231,6 +1277,11 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, ...@@ -1231,6 +1277,11 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
ASSERT(orig_bytes); ASSERT(orig_bytes);
ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
if (flush == BTRFS_RESERVE_FLUSH_DATA)
async_work = &fs_info->async_data_reclaim_work;
else
async_work = &fs_info->async_reclaim_work;
spin_lock(&space_info->lock); spin_lock(&space_info->lock);
ret = -ENOSPC; ret = -ENOSPC;
used = btrfs_space_info_used(space_info, true); used = btrfs_space_info_used(space_info, true);
...@@ -1272,7 +1323,8 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, ...@@ -1272,7 +1323,8 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
init_waitqueue_head(&ticket.wait); init_waitqueue_head(&ticket.wait);
ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
if (flush == BTRFS_RESERVE_FLUSH_ALL || if (flush == BTRFS_RESERVE_FLUSH_ALL ||
flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) { flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
flush == BTRFS_RESERVE_FLUSH_DATA) {
list_add_tail(&ticket.list, &space_info->tickets); list_add_tail(&ticket.list, &space_info->tickets);
if (!space_info->flush) { if (!space_info->flush) {
space_info->flush = 1; space_info->flush = 1;
...@@ -1280,8 +1332,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info, ...@@ -1280,8 +1332,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
space_info->flags, space_info->flags,
orig_bytes, flush, orig_bytes, flush,
"enospc"); "enospc");
queue_work(system_unbound_wq, queue_work(system_unbound_wq, async_work);
&fs_info->async_reclaim_work);
} }
} else { } else {
list_add_tail(&ticket.list, list_add_tail(&ticket.list,
......
...@@ -1871,6 +1871,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) ...@@ -1871,6 +1871,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
* the filesystem is busy. * the filesystem is busy.
*/ */
cancel_work_sync(&fs_info->async_reclaim_work); cancel_work_sync(&fs_info->async_reclaim_work);
cancel_work_sync(&fs_info->async_data_reclaim_work);
btrfs_discard_cleanup(fs_info); btrfs_discard_cleanup(fs_info);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment