Commit 6b5b817f authored by Chris Mason's avatar Chris Mason

Merge branch 'bug-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/josef/btrfs-work

Conflicts:
	fs/btrfs/extent-tree.c
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parents 8216ef86 e9bb7f10
......@@ -698,7 +698,8 @@ struct btrfs_block_group_item {
struct btrfs_space_info {
u64 flags;
u64 total_bytes; /* total bytes in the space */
u64 total_bytes; /* total bytes in the space,
this doesn't take mirrors into account */
u64 bytes_used; /* total bytes used,
this does't take mirrors into account */
u64 bytes_pinned; /* total bytes pinned, will be freed when the
......@@ -710,6 +711,8 @@ struct btrfs_space_info {
u64 bytes_may_use; /* number of bytes that may be used for
delalloc/allocations */
u64 disk_used; /* total bytes used on disk */
u64 disk_total; /* total bytes on disk, takes mirrors into
account */
int full; /* indicates that we cannot allocate any more
chunks for this space */
......@@ -2146,7 +2149,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
int num_items, int *retries);
int num_items);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
......@@ -2167,7 +2170,7 @@ void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes, int *retries);
u64 num_bytes);
int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
......@@ -2441,7 +2444,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
u32 min_type);
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput);
int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
int sync);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state);
int btrfs_writepages(struct address_space *mapping,
......
This diff is collapsed.
......@@ -6681,7 +6681,8 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
return 0;
}
int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput)
int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
int sync)
{
struct btrfs_inode *binode;
struct inode *inode = NULL;
......@@ -6703,7 +6704,26 @@ int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput)
spin_unlock(&root->fs_info->delalloc_lock);
if (inode) {
write_inode_now(inode, 0);
if (sync) {
filemap_write_and_wait(inode->i_mapping);
/*
* We have to do this because compression doesn't
* actually set PG_writeback until it submits the pages
* for IO, which happens in an async thread, so we could
* race and not actually wait for any writeback pages
* because they've not been submitted yet. Technically
* this could still be the case for the ordered stuff
* since the async thread may not have started to do its
* work yet. If this becomes the case then we need to
* figure out a way to make sure that in writepage we
* wait for any async pages to be submitted before
* returning so that fdatawait does what its supposed to
* do.
*/
btrfs_wait_ordered_range(inode, 0, (u64)-1);
} else {
filemap_flush(inode->i_mapping);
}
if (delay_iput)
btrfs_add_delayed_iput(inode);
else
......
......@@ -1879,6 +1879,22 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
return 0;
}
static void get_block_group_info(struct list_head *groups_list,
struct btrfs_ioctl_space_info *space)
{
struct btrfs_block_group_cache *block_group;
space->total_bytes = 0;
space->used_bytes = 0;
space->flags = 0;
list_for_each_entry(block_group, groups_list, list) {
space->flags = block_group->flags;
space->total_bytes += block_group->key.offset;
space->used_bytes +=
btrfs_block_group_used(&block_group->item);
}
}
long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
{
struct btrfs_ioctl_space_args space_args;
......@@ -1887,27 +1903,56 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
struct btrfs_ioctl_space_info *dest_orig;
struct btrfs_ioctl_space_info *user_dest;
struct btrfs_space_info *info;
u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
BTRFS_BLOCK_GROUP_SYSTEM,
BTRFS_BLOCK_GROUP_METADATA,
BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
int num_types = 4;
int alloc_size;
int ret = 0;
int slot_count = 0;
int i, c;
if (copy_from_user(&space_args,
(struct btrfs_ioctl_space_args __user *)arg,
sizeof(space_args)))
return -EFAULT;
/* first we count slots */
for (i = 0; i < num_types; i++) {
struct btrfs_space_info *tmp;
info = NULL;
rcu_read_lock();
list_for_each_entry_rcu(info, &root->fs_info->space_info, list)
slot_count++;
list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
list) {
if (tmp->flags == types[i]) {
info = tmp;
break;
}
}
rcu_read_unlock();
if (!info)
continue;
down_read(&info->groups_sem);
for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
if (!list_empty(&info->block_groups[c]))
slot_count++;
}
up_read(&info->groups_sem);
}
/* space_slots == 0 means they are asking for a count */
if (space_args.space_slots == 0) {
space_args.total_spaces = slot_count;
goto out;
}
slot_count = min_t(int, space_args.space_slots, slot_count);
alloc_size = sizeof(*dest) * slot_count;
/* we generally have at most 6 or so space infos, one for each raid
* level. So, a whole page should be more than enough for everyone
*/
......@@ -1921,27 +1966,34 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
dest_orig = dest;
/* now we have a buffer to copy into */
rcu_read_lock();
list_for_each_entry_rcu(info, &root->fs_info->space_info, list) {
/* make sure we don't copy more than we allocated
* in our buffer
*/
if (slot_count == 0)
break;
slot_count--;
for (i = 0; i < num_types; i++) {
struct btrfs_space_info *tmp;
/* make sure userland has enough room in their buffer */
if (space_args.total_spaces >= space_args.space_slots)
info = NULL;
rcu_read_lock();
list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
list) {
if (tmp->flags == types[i]) {
info = tmp;
break;
}
}
rcu_read_unlock();
space.flags = info->flags;
space.total_bytes = info->total_bytes;
space.used_bytes = info->bytes_used;
if (!info)
continue;
down_read(&info->groups_sem);
for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
if (!list_empty(&info->block_groups[c])) {
get_block_group_info(&info->block_groups[c],
&space);
memcpy(dest, &space, sizeof(space));
dest++;
space_args.total_spaces++;
}
rcu_read_unlock();
}
up_read(&info->groups_sem);
}
user_dest = (struct btrfs_ioctl_space_info *)
(arg + sizeof(struct btrfs_ioctl_space_args));
......
......@@ -179,8 +179,6 @@ struct reloc_control {
u64 search_start;
u64 extents_found;
int block_rsv_retries;
unsigned int stage:8;
unsigned int create_reloc_tree:1;
unsigned int merge_reloc_tree:1;
......@@ -2134,7 +2132,6 @@ int prepare_to_merge(struct reloc_control *rc, int err)
LIST_HEAD(reloc_roots);
u64 num_bytes = 0;
int ret;
int retries = 0;
mutex_lock(&root->fs_info->trans_mutex);
rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
......@@ -2144,7 +2141,7 @@ int prepare_to_merge(struct reloc_control *rc, int err)
if (!err) {
num_bytes = rc->merging_rsv_size;
ret = btrfs_block_rsv_add(NULL, root, rc->block_rsv,
num_bytes, &retries);
num_bytes);
if (ret)
err = ret;
}
......@@ -2156,7 +2153,6 @@ int prepare_to_merge(struct reloc_control *rc, int err)
btrfs_end_transaction(trans, rc->extent_root);
btrfs_block_rsv_release(rc->extent_root,
rc->block_rsv, num_bytes);
retries = 0;
goto again;
}
}
......@@ -2406,15 +2402,13 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
num_bytes = calcu_metadata_size(rc, node, 1) * 2;
trans->block_rsv = rc->block_rsv;
ret = btrfs_block_rsv_add(trans, root, rc->block_rsv, num_bytes,
&rc->block_rsv_retries);
ret = btrfs_block_rsv_add(trans, root, rc->block_rsv, num_bytes);
if (ret) {
if (ret == -EAGAIN)
rc->commit_transaction = 1;
return ret;
}
rc->block_rsv_retries = 0;
return 0;
}
......@@ -3615,8 +3609,7 @@ int prepare_to_relocate(struct reloc_control *rc)
* is no reservation in transaction handle.
*/
ret = btrfs_block_rsv_add(NULL, rc->extent_root, rc->block_rsv,
rc->extent_root->nodesize * 256,
&rc->block_rsv_retries);
rc->extent_root->nodesize * 256);
if (ret)
return ret;
......@@ -3628,7 +3621,6 @@ int prepare_to_relocate(struct reloc_control *rc)
rc->extents_found = 0;
rc->nodes_relocated = 0;
rc->merging_rsv_size = 0;
rc->block_rsv_retries = 0;
rc->create_reloc_tree = 1;
set_reloc_control(rc);
......
......@@ -638,7 +638,7 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags,
if (IS_ERR(root)) {
error = PTR_ERR(root);
deactivate_locked_super(s);
goto error;
goto error_free_subvol_name;
}
/* if they gave us a subvolume name bind mount into that */
if (strcmp(subvol_name, ".")) {
......@@ -652,14 +652,14 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags,
deactivate_locked_super(s);
error = PTR_ERR(new_root);
dput(root);
goto error_close_devices;
goto error_free_subvol_name;
}
if (!new_root->d_inode) {
dput(root);
dput(new_root);
deactivate_locked_super(s);
error = -ENXIO;
goto error_close_devices;
goto error_free_subvol_name;
}
dput(root);
root = new_root;
......@@ -677,7 +677,6 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags,
btrfs_close_devices(fs_devices);
error_free_subvol_name:
kfree(subvol_name);
error:
return error;
}
......@@ -725,18 +724,25 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
struct list_head *head = &root->fs_info->space_info;
struct btrfs_space_info *found;
u64 total_used = 0;
u64 total_used_data = 0;
int bits = dentry->d_sb->s_blocksize_bits;
__be32 *fsid = (__be32 *)root->fs_info->fsid;
rcu_read_lock();
list_for_each_entry_rcu(found, head, list)
list_for_each_entry_rcu(found, head, list) {
if (found->flags & (BTRFS_BLOCK_GROUP_METADATA |
BTRFS_BLOCK_GROUP_SYSTEM))
total_used_data += found->disk_total;
else
total_used_data += found->disk_used;
total_used += found->disk_used;
}
rcu_read_unlock();
buf->f_namelen = BTRFS_NAME_LEN;
buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits;
buf->f_bfree = buf->f_blocks - (total_used >> bits);
buf->f_bavail = buf->f_bfree;
buf->f_bavail = buf->f_blocks - (total_used_data >> bits);
buf->f_bsize = dentry->d_sb->s_blocksize;
buf->f_type = BTRFS_SUPER_MAGIC;
......
......@@ -180,7 +180,6 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
{
struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans;
int retries = 0;
int ret;
again:
h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
......@@ -215,8 +214,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
}
if (num_items > 0) {
ret = btrfs_trans_reserve_metadata(h, root, num_items,
&retries);
ret = btrfs_trans_reserve_metadata(h, root, num_items);
if (ret == -EAGAIN) {
btrfs_commit_transaction(h, root);
goto again;
......@@ -855,7 +853,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
struct extent_buffer *tmp;
struct extent_buffer *old;
int ret;
int retries = 0;
u64 to_reserve = 0;
u64 index = 0;
u64 objectid;
......@@ -877,7 +874,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
if (to_reserve > 0) {
ret = btrfs_block_rsv_add(trans, root, &pending->block_rsv,
to_reserve, &retries);
to_reserve);
if (ret) {
pending->error = ret;
goto fail;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment