Commit 0bc09ca1 authored by Naohiro Aota's avatar Naohiro Aota Committed by David Sterba

btrfs: zoned: serialize metadata IO

We cannot use zone append for writing metadata, because the B-tree nodes
have references to each other using logical address. Without knowing
the address in advance, we cannot construct the tree in the first place.
So we need to serialize write IOs for metadata.

We cannot add a mutex around allocation and submission because metadata
blocks are allocated in an earlier stage to build up B-trees.

Add a zoned_meta_io_lock and hold it during metadata IO submission in
btree_write_cache_pages() to serialize IOs.

Furthermore, this adds a per-block group metadata IO submission pointer
"meta_write_pointer" to ensure sequential writing, which can break when
attempting to write back blocks in an unfinished transaction. If the
writing out failed because of a hole and the write out is for data
integrity (WB_SYNC_ALL), it returns EAGAIN.

A caller like fsync() code should handle this properly e.g. by falling
back to a full transaction commit.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarNaohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 42c01100
...@@ -193,6 +193,7 @@ struct btrfs_block_group { ...@@ -193,6 +193,7 @@ struct btrfs_block_group {
*/ */
u64 alloc_offset; u64 alloc_offset;
u64 zone_unusable; u64 zone_unusable;
u64 meta_write_pointer;
}; };
static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group) static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
......
...@@ -975,6 +975,7 @@ struct btrfs_fs_info { ...@@ -975,6 +975,7 @@ struct btrfs_fs_info {
/* Max size to emit ZONE_APPEND write command */ /* Max size to emit ZONE_APPEND write command */
u64 max_zone_append_size; u64 max_zone_append_size;
struct mutex zoned_meta_io_lock;
#ifdef CONFIG_BTRFS_FS_REF_VERIFY #ifdef CONFIG_BTRFS_FS_REF_VERIFY
spinlock_t ref_verify_lock; spinlock_t ref_verify_lock;
......
...@@ -2769,6 +2769,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) ...@@ -2769,6 +2769,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
mutex_init(&fs_info->delete_unused_bgs_mutex); mutex_init(&fs_info->delete_unused_bgs_mutex);
mutex_init(&fs_info->reloc_mutex); mutex_init(&fs_info->reloc_mutex);
mutex_init(&fs_info->delalloc_root_mutex); mutex_init(&fs_info->delalloc_root_mutex);
mutex_init(&fs_info->zoned_meta_io_lock);
seqlock_init(&fs_info->profiles_lock); seqlock_init(&fs_info->profiles_lock);
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "disk-io.h" #include "disk-io.h"
#include "subpage.h" #include "subpage.h"
#include "zoned.h" #include "zoned.h"
#include "block-group.h"
static struct kmem_cache *extent_state_cache; static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache; static struct kmem_cache *extent_buffer_cache;
...@@ -4161,6 +4162,7 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc, ...@@ -4161,6 +4162,7 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
struct extent_buffer **eb_context) struct extent_buffer **eb_context)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
struct btrfs_block_group *cache = NULL;
struct extent_buffer *eb; struct extent_buffer *eb;
int ret; int ret;
...@@ -4193,13 +4195,31 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc, ...@@ -4193,13 +4195,31 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
if (!ret) if (!ret)
return 0; return 0;
if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
/*
* If for_sync, this hole will be filled with
* trasnsaction commit.
*/
if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
ret = -EAGAIN;
else
ret = 0;
free_extent_buffer(eb);
return ret;
}
*eb_context = eb; *eb_context = eb;
ret = lock_extent_buffer_for_io(eb, epd); ret = lock_extent_buffer_for_io(eb, epd);
if (ret <= 0) { if (ret <= 0) {
btrfs_revert_meta_write_pointer(cache, eb);
if (cache)
btrfs_put_block_group(cache);
free_extent_buffer(eb); free_extent_buffer(eb);
return ret; return ret;
} }
if (cache)
btrfs_put_block_group(cache);
ret = write_one_eb(eb, wbc, epd); ret = write_one_eb(eb, wbc, epd);
free_extent_buffer(eb); free_extent_buffer(eb);
if (ret < 0) if (ret < 0)
...@@ -4245,6 +4265,7 @@ int btree_write_cache_pages(struct address_space *mapping, ...@@ -4245,6 +4265,7 @@ int btree_write_cache_pages(struct address_space *mapping,
tag = PAGECACHE_TAG_TOWRITE; tag = PAGECACHE_TAG_TOWRITE;
else else
tag = PAGECACHE_TAG_DIRTY; tag = PAGECACHE_TAG_DIRTY;
btrfs_zoned_meta_io_lock(fs_info);
retry: retry:
if (wbc->sync_mode == WB_SYNC_ALL) if (wbc->sync_mode == WB_SYNC_ALL)
tag_pages_for_writeback(mapping, index, end); tag_pages_for_writeback(mapping, index, end);
...@@ -4285,7 +4306,7 @@ int btree_write_cache_pages(struct address_space *mapping, ...@@ -4285,7 +4306,7 @@ int btree_write_cache_pages(struct address_space *mapping,
} }
if (ret < 0) { if (ret < 0) {
end_write_bio(&epd, ret); end_write_bio(&epd, ret);
return ret; goto out;
} }
/* /*
* If something went wrong, don't allow any metadata write bio to be * If something went wrong, don't allow any metadata write bio to be
...@@ -4320,6 +4341,8 @@ int btree_write_cache_pages(struct address_space *mapping, ...@@ -4320,6 +4341,8 @@ int btree_write_cache_pages(struct address_space *mapping,
ret = -EROFS; ret = -EROFS;
end_write_bio(&epd, ret); end_write_bio(&epd, ret);
} }
out:
btrfs_zoned_meta_io_unlock(fs_info);
return ret; return ret;
} }
......
...@@ -1159,6 +1159,9 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) ...@@ -1159,6 +1159,9 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
ret = -EIO; ret = -EIO;
} }
if (!ret)
cache->meta_write_pointer = cache->alloc_offset + cache->start;
kfree(alloc_offsets); kfree(alloc_offsets);
free_extent_map(em); free_extent_map(em);
...@@ -1317,3 +1320,50 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered) ...@@ -1317,3 +1320,50 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
kfree(logical); kfree(logical);
bdput(bdev); bdput(bdev);
} }
bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb,
struct btrfs_block_group **cache_ret)
{
struct btrfs_block_group *cache;
bool ret = true;
if (!btrfs_is_zoned(fs_info))
return true;
cache = *cache_ret;
if (cache && (eb->start < cache->start ||
cache->start + cache->length <= eb->start)) {
btrfs_put_block_group(cache);
cache = NULL;
*cache_ret = NULL;
}
if (!cache)
cache = btrfs_lookup_block_group(fs_info, eb->start);
if (cache) {
if (cache->meta_write_pointer != eb->start) {
btrfs_put_block_group(cache);
cache = NULL;
ret = false;
} else {
cache->meta_write_pointer = eb->start + eb->len;
}
*cache_ret = cache;
}
return ret;
}
void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
struct extent_buffer *eb)
{
if (!btrfs_is_zoned(eb->fs_info) || !cache)
return;
ASSERT(cache->meta_write_pointer == eb->start + eb->len);
cache->meta_write_pointer = eb->start;
}
...@@ -50,6 +50,11 @@ bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em); ...@@ -50,6 +50,11 @@ bool btrfs_use_zone_append(struct btrfs_inode *inode, struct extent_map *em);
void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset, void btrfs_record_physical_zoned(struct inode *inode, u64 file_offset,
struct bio *bio); struct bio *bio);
void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered); void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered);
bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb,
struct btrfs_block_group **cache_ret);
void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
struct extent_buffer *eb);
#else /* CONFIG_BLK_DEV_ZONED */ #else /* CONFIG_BLK_DEV_ZONED */
static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
struct blk_zone *zone) struct blk_zone *zone)
...@@ -151,6 +156,19 @@ static inline void btrfs_record_physical_zoned(struct inode *inode, ...@@ -151,6 +156,19 @@ static inline void btrfs_record_physical_zoned(struct inode *inode,
static inline void btrfs_rewrite_logical_zoned( static inline void btrfs_rewrite_logical_zoned(
struct btrfs_ordered_extent *ordered) { } struct btrfs_ordered_extent *ordered) { }
static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb,
struct btrfs_block_group **cache_ret)
{
return true;
}
static inline void btrfs_revert_meta_write_pointer(
struct btrfs_block_group *cache,
struct extent_buffer *eb)
{
}
#endif #endif
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos) static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
...@@ -242,4 +260,18 @@ static inline bool btrfs_can_zone_reset(struct btrfs_device *device, ...@@ -242,4 +260,18 @@ static inline bool btrfs_can_zone_reset(struct btrfs_device *device,
return true; return true;
} }
static inline void btrfs_zoned_meta_io_lock(struct btrfs_fs_info *fs_info)
{
if (!btrfs_is_zoned(fs_info))
return;
mutex_lock(&fs_info->zoned_meta_io_lock);
}
static inline void btrfs_zoned_meta_io_unlock(struct btrfs_fs_info *fs_info)
{
if (!btrfs_is_zoned(fs_info))
return;
mutex_unlock(&fs_info->zoned_meta_io_lock);
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment