Commit eab49bec authored by Chris Mason's avatar Chris Mason

Merge branch 'bug-fixes' of git://repo.or.cz/linux-btrfs-devel into btrfs-38

parents acce952b 4d728ec7
...@@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) ...@@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
char *value = NULL; char *value = NULL;
struct posix_acl *acl; struct posix_acl *acl;
if (!IS_POSIXACL(inode))
return NULL;
acl = get_cached_acl(inode, type); acl = get_cached_acl(inode, type);
if (acl != ACL_NOT_CACHED) if (acl != ACL_NOT_CACHED)
return acl; return acl;
...@@ -84,6 +87,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name, ...@@ -84,6 +87,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name,
struct posix_acl *acl; struct posix_acl *acl;
int ret = 0; int ret = 0;
if (!IS_POSIXACL(dentry->d_inode))
return -EOPNOTSUPP;
acl = btrfs_get_acl(dentry->d_inode, type); acl = btrfs_get_acl(dentry->d_inode, type);
if (IS_ERR(acl)) if (IS_ERR(acl))
......
...@@ -2565,6 +2565,8 @@ int close_ctree(struct btrfs_root *root) ...@@ -2565,6 +2565,8 @@ int close_ctree(struct btrfs_root *root)
kfree(fs_info->chunk_root); kfree(fs_info->chunk_root);
kfree(fs_info->dev_root); kfree(fs_info->dev_root);
kfree(fs_info->csum_root); kfree(fs_info->csum_root);
kfree(fs_info);
return 0; return 0;
} }
......
...@@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, ...@@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
return entry; return entry;
} }
static void unlink_free_space(struct btrfs_block_group_cache *block_group, static inline void
struct btrfs_free_space *info) __unlink_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *info)
{ {
rb_erase(&info->offset_index, &block_group->free_space_offset); rb_erase(&info->offset_index, &block_group->free_space_offset);
block_group->free_extents--; block_group->free_extents--;
}
static void unlink_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *info)
{
__unlink_free_space(block_group, info);
block_group->free_space -= info->bytes; block_group->free_space -= info->bytes;
} }
...@@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) ...@@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
u64 max_bytes; u64 max_bytes;
u64 bitmap_bytes; u64 bitmap_bytes;
u64 extent_bytes; u64 extent_bytes;
u64 size = block_group->key.offset;
/* /*
* The goal is to keep the total amount of memory used per 1gb of space * The goal is to keep the total amount of memory used per 1gb of space
* at or below 32k, so we need to adjust how much memory we allow to be * at or below 32k, so we need to adjust how much memory we allow to be
* used by extent based free space tracking * used by extent based free space tracking
*/ */
max_bytes = MAX_CACHE_BYTES_PER_GIG * if (size < 1024 * 1024 * 1024)
(div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); max_bytes = MAX_CACHE_BYTES_PER_GIG;
else
max_bytes = MAX_CACHE_BYTES_PER_GIG *
div64_u64(size, 1024 * 1024 * 1024);
/* /*
* we want to account for 1 more bitmap than what we have so we can make * we want to account for 1 more bitmap than what we have so we can make
...@@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group, ...@@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
recalculate_thresholds(block_group); recalculate_thresholds(block_group);
} }
static void free_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *bitmap_info)
{
unlink_free_space(block_group, bitmap_info);
kfree(bitmap_info->bitmap);
kfree(bitmap_info);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *bitmap_info, struct btrfs_free_space *bitmap_info,
u64 *offset, u64 *bytes) u64 *offset, u64 *bytes)
...@@ -1211,13 +1232,8 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro ...@@ -1211,13 +1232,8 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro
if (*bytes) { if (*bytes) {
struct rb_node *next = rb_next(&bitmap_info->offset_index); struct rb_node *next = rb_next(&bitmap_info->offset_index);
if (!bitmap_info->bytes) { if (!bitmap_info->bytes)
unlink_free_space(block_group, bitmap_info); free_bitmap(block_group, bitmap_info);
kfree(bitmap_info->bitmap);
kfree(bitmap_info);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
/* /*
* no entry after this bitmap, but we still have bytes to * no entry after this bitmap, but we still have bytes to
...@@ -1250,13 +1266,8 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro ...@@ -1250,13 +1266,8 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro
return -EAGAIN; return -EAGAIN;
goto again; goto again;
} else if (!bitmap_info->bytes) { } else if (!bitmap_info->bytes)
unlink_free_space(block_group, bitmap_info); free_bitmap(block_group, bitmap_info);
kfree(bitmap_info->bitmap);
kfree(bitmap_info);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
return 0; return 0;
} }
...@@ -1359,22 +1370,14 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, ...@@ -1359,22 +1370,14 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
return ret; return ret;
} }
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes) struct btrfs_free_space *info, bool update_stat)
{ {
struct btrfs_free_space *right_info = NULL; struct btrfs_free_space *left_info;
struct btrfs_free_space *left_info = NULL; struct btrfs_free_space *right_info;
struct btrfs_free_space *info = NULL; bool merged = false;
int ret = 0; u64 offset = info->offset;
u64 bytes = info->bytes;
info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
if (!info)
return -ENOMEM;
info->offset = offset;
info->bytes = bytes;
spin_lock(&block_group->tree_lock);
/* /*
* first we want to see if there is free space adjacent to the range we * first we want to see if there is free space adjacent to the range we
...@@ -1388,37 +1391,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, ...@@ -1388,37 +1391,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
else else
left_info = tree_search_offset(block_group, offset - 1, 0, 0); left_info = tree_search_offset(block_group, offset - 1, 0, 0);
/*
* If there was no extent directly to the left or right of this new
* extent then we know we're going to have to allocate a new extent, so
* before we do that see if we need to drop this into a bitmap
*/
if ((!left_info || left_info->bitmap) &&
(!right_info || right_info->bitmap)) {
ret = insert_into_bitmap(block_group, info);
if (ret < 0) {
goto out;
} else if (ret) {
ret = 0;
goto out;
}
}
if (right_info && !right_info->bitmap) { if (right_info && !right_info->bitmap) {
unlink_free_space(block_group, right_info); if (update_stat)
unlink_free_space(block_group, right_info);
else
__unlink_free_space(block_group, right_info);
info->bytes += right_info->bytes; info->bytes += right_info->bytes;
kfree(right_info); kfree(right_info);
merged = true;
} }
if (left_info && !left_info->bitmap && if (left_info && !left_info->bitmap &&
left_info->offset + left_info->bytes == offset) { left_info->offset + left_info->bytes == offset) {
unlink_free_space(block_group, left_info); if (update_stat)
unlink_free_space(block_group, left_info);
else
__unlink_free_space(block_group, left_info);
info->offset = left_info->offset; info->offset = left_info->offset;
info->bytes += left_info->bytes; info->bytes += left_info->bytes;
kfree(left_info); kfree(left_info);
merged = true;
} }
return merged;
}
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes)
{
struct btrfs_free_space *info;
int ret = 0;
info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
if (!info)
return -ENOMEM;
info->offset = offset;
info->bytes = bytes;
spin_lock(&block_group->tree_lock);
if (try_merge_free_space(block_group, info, true))
goto link;
/*
* There was no extent directly to the left or right of this new
* extent then we know we're going to have to allocate a new extent, so
* before we do that see if we need to drop this into a bitmap
*/
ret = insert_into_bitmap(block_group, info);
if (ret < 0) {
goto out;
} else if (ret) {
ret = 0;
goto out;
}
link:
ret = link_free_space(block_group, info); ret = link_free_space(block_group, info);
if (ret) if (ret)
kfree(info); kfree(info);
...@@ -1621,6 +1649,7 @@ __btrfs_return_cluster_to_free_space( ...@@ -1621,6 +1649,7 @@ __btrfs_return_cluster_to_free_space(
node = rb_next(&entry->offset_index); node = rb_next(&entry->offset_index);
rb_erase(&entry->offset_index, &cluster->root); rb_erase(&entry->offset_index, &cluster->root);
BUG_ON(entry->bitmap); BUG_ON(entry->bitmap);
try_merge_free_space(block_group, entry, false);
tree_insert_offset(&block_group->free_space_offset, tree_insert_offset(&block_group->free_space_offset,
entry->offset, &entry->offset_index, 0); entry->offset, &entry->offset_index, 0);
} }
...@@ -1685,13 +1714,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ...@@ -1685,13 +1714,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
ret = offset; ret = offset;
if (entry->bitmap) { if (entry->bitmap) {
bitmap_clear_bits(block_group, entry, offset, bytes); bitmap_clear_bits(block_group, entry, offset, bytes);
if (!entry->bytes) { if (!entry->bytes)
unlink_free_space(block_group, entry); free_bitmap(block_group, entry);
kfree(entry->bitmap);
kfree(entry);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
} else { } else {
unlink_free_space(block_group, entry); unlink_free_space(block_group, entry);
entry->offset += bytes; entry->offset += bytes;
...@@ -1789,6 +1813,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, ...@@ -1789,6 +1813,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
ret = search_start; ret = search_start;
bitmap_clear_bits(block_group, entry, ret, bytes); bitmap_clear_bits(block_group, entry, ret, bytes);
if (entry->bytes == 0)
free_bitmap(block_group, entry);
out: out:
spin_unlock(&cluster->lock); spin_unlock(&cluster->lock);
spin_unlock(&block_group->tree_lock); spin_unlock(&block_group->tree_lock);
...@@ -1842,15 +1868,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, ...@@ -1842,15 +1868,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
entry->offset += bytes; entry->offset += bytes;
entry->bytes -= bytes; entry->bytes -= bytes;
if (entry->bytes == 0) { if (entry->bytes == 0)
rb_erase(&entry->offset_index, &cluster->root); rb_erase(&entry->offset_index, &cluster->root);
kfree(entry);
}
break; break;
} }
out: out:
spin_unlock(&cluster->lock); spin_unlock(&cluster->lock);
if (!ret)
return 0;
spin_lock(&block_group->tree_lock);
block_group->free_space -= bytes;
if (entry->bytes == 0) {
block_group->free_extents--;
kfree(entry);
}
spin_unlock(&block_group->tree_lock);
return ret; return ret;
} }
......
...@@ -1557,6 +1557,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) ...@@ -1557,6 +1557,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
out_page: out_page:
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
kfree(fixup);
} }
/* /*
......
...@@ -1898,7 +1898,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -1898,7 +1898,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
memcpy(&new_key, &key, sizeof(new_key)); memcpy(&new_key, &key, sizeof(new_key));
new_key.objectid = inode->i_ino; new_key.objectid = inode->i_ino;
new_key.offset = key.offset + destoff - off; if (off <= key.offset)
new_key.offset = key.offset + destoff - off;
else
new_key.offset = destoff;
trans = btrfs_start_transaction(root, 1); trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
......
...@@ -383,7 +383,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, ...@@ -383,7 +383,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
struct btrfs_fs_devices **fs_devices) struct btrfs_fs_devices **fs_devices)
{ {
substring_t args[MAX_OPT_ARGS]; substring_t args[MAX_OPT_ARGS];
char *opts, *p; char *opts, *orig, *p;
int error = 0; int error = 0;
int intarg; int intarg;
...@@ -397,6 +397,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, ...@@ -397,6 +397,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
opts = kstrdup(options, GFP_KERNEL); opts = kstrdup(options, GFP_KERNEL);
if (!opts) if (!opts)
return -ENOMEM; return -ENOMEM;
orig = opts;
while ((p = strsep(&opts, ",")) != NULL) { while ((p = strsep(&opts, ",")) != NULL) {
int token; int token;
...@@ -432,7 +433,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, ...@@ -432,7 +433,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
} }
out_free_opts: out_free_opts:
kfree(opts); kfree(orig);
out: out:
/* /*
* If no subvolume name is specified we use the default one. Allocate * If no subvolume name is specified we use the default one. Allocate
...@@ -760,6 +761,8 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, ...@@ -760,6 +761,8 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags,
} }
btrfs_close_devices(fs_devices); btrfs_close_devices(fs_devices);
kfree(fs_info);
kfree(tree_root);
} else { } else {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment