Commit 4ceff079 authored by Qu Wenruo's avatar Qu Wenruo Committed by Chris Mason

btrfs: extent-tree: Add new version of btrfs_check_data_free_space and...

btrfs: extent-tree: Add new version of btrfs_check_data_free_space and btrfs_free_reserved_data_space.

Add new functions __btrfs_check_data_free_space() and
__btrfs_free_reserved_data_space() to work with new accurate qgroup
reserved space framework.

The new function will replace old btrfs_check_data_free_space() and
btrfs_free_reserved_data_space() respectively, but until all the change
is done, let's just use the new name.

Also, export internal use function btrfs_alloc_data_chunk_ondemand(), as
now qgroup reserve requires precious bytes, some operation can't get the
accurate number in advance(like fallocate).
But data space info check and data chunk allocate doesn't need to be
that accurate, and can be called at the beginning.

So export it for later operations.
Signed-off-by: default avatarQu Wenruo <quwenruo@cn.fujitsu.com>
Signed-off-by: default avatarChris Mason <clm@fb.com>
parent 7174109c
...@@ -3453,7 +3453,10 @@ enum btrfs_reserve_flush_enum { ...@@ -3453,7 +3453,10 @@ enum btrfs_reserve_flush_enum {
}; };
int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes); int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes);
int __btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len);
int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes); void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
void __btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root); struct btrfs_root *root);
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans); void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
......
...@@ -3904,11 +3904,7 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) ...@@ -3904,11 +3904,7 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
return ret; return ret;
} }
/* int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
* This will check the space that the inode allocates from to make sure we have
* enough space for bytes.
*/
int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
{ {
struct btrfs_space_info *data_sinfo; struct btrfs_space_info *data_sinfo;
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
...@@ -4029,18 +4025,54 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes) ...@@ -4029,18 +4025,54 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
data_sinfo->flags, bytes, 1); data_sinfo->flags, bytes, 1);
return -ENOSPC; return -ENOSPC;
} }
ret = btrfs_qgroup_reserve(root, write_bytes);
if (ret)
goto out;
data_sinfo->bytes_may_use += bytes; data_sinfo->bytes_may_use += bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info", trace_btrfs_space_reservation(root->fs_info, "space_info",
data_sinfo->flags, bytes, 1); data_sinfo->flags, bytes, 1);
out:
spin_unlock(&data_sinfo->lock); spin_unlock(&data_sinfo->lock);
return ret; return ret;
} }
/*
* This will check the space that the inode allocates from to make sure we have
* enough space for bytes.
*/
int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
ret = btrfs_alloc_data_chunk_ondemand(inode, bytes);
if (ret < 0)
return ret;
ret = btrfs_qgroup_reserve(root, write_bytes);
return ret;
}
/*
* New check_data_free_space() with ability for precious data reservation
* Will replace old btrfs_check_data_free_space(), but for patch split,
* add a new function first and then replace it.
*/
int __btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
/* align the range */
len = round_up(start + len, root->sectorsize) -
round_down(start, root->sectorsize);
start = round_down(start, root->sectorsize);
ret = btrfs_alloc_data_chunk_ondemand(inode, len);
if (ret < 0)
return ret;
/* Use new btrfs_qgroup_reserve_data to reserve precious data space */
ret = btrfs_qgroup_reserve_data(inode, start, len);
return ret;
}
/* /*
* Called if we need to clear a data reservation for this inode. * Called if we need to clear a data reservation for this inode.
*/ */
...@@ -4061,6 +4093,41 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes) ...@@ -4061,6 +4093,41 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
spin_unlock(&data_sinfo->lock); spin_unlock(&data_sinfo->lock);
} }
/*
* Called if we need to clear a data reservation for this inode
* Normally in a error case.
*
* This one will handle the per-indoe data rsv map for accurate reserved
* space framework.
*/
void __btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_space_info *data_sinfo;
/* Make sure the range is aligned to sectorsize */
len = round_up(start + len, root->sectorsize) -
round_down(start, root->sectorsize);
start = round_down(start, root->sectorsize);
/*
* Free any reserved qgroup data space first
* As it will alloc memory, we can't do it with data sinfo
* spinlock hold.
*/
btrfs_qgroup_free_data(inode, start, len);
data_sinfo = root->fs_info->data_sinfo;
spin_lock(&data_sinfo->lock);
if (WARN_ON(data_sinfo->bytes_may_use < len))
data_sinfo->bytes_may_use = 0;
else
data_sinfo->bytes_may_use -= len;
trace_btrfs_space_reservation(root->fs_info, "space_info",
data_sinfo->flags, len, 0);
spin_unlock(&data_sinfo->lock);
}
static void force_metadata_allocation(struct btrfs_fs_info *info) static void force_metadata_allocation(struct btrfs_fs_info *info)
{ {
struct list_head *head = &info->space_info; struct list_head *head = &info->space_info;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment