Commit cb5520f0 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable

* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: (33 commits)
  Btrfs: Fix page count calculation
  btrfs: Drop __exit attribute on btrfs_exit_compress
  btrfs: cleanup error handling in btrfs_unlink_inode()
  Btrfs: exclude super blocks when we read in block groups
  Btrfs: make sure search_bitmap finds something in remove_from_bitmap
  btrfs: fix return value check of btrfs_start_transaction()
  btrfs: checking NULL or not in some functions
  Btrfs: avoid uninit variable warnings in ordered-data.c
  Btrfs: catch errors from btrfs_sync_log
  Btrfs: make shrink_delalloc a little friendlier
  Btrfs: handle no memory properly in prepare_pages
  Btrfs: do error checking in btrfs_del_csums
  Btrfs: use the global block reserve if we cannot reserve space
  Btrfs: do not release more reserved bytes to the global_block_rsv than we need
  Btrfs: fix check_path_shared so it returns the right value
  btrfs: check return value of btrfs_start_ioctl_transaction() properly
  btrfs: fix return value check of btrfs_join_transaction()
  fs/btrfs/inode.c: Add missing IS_ERR test
  btrfs: fix missing break in switch phrase
  btrfs: fix several uncheck memory allocations
  ...
parents eee4da2c 3a90983d
...@@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) ...@@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
char *value = NULL; char *value = NULL;
struct posix_acl *acl; struct posix_acl *acl;
if (!IS_POSIXACL(inode))
return NULL;
acl = get_cached_acl(inode, type); acl = get_cached_acl(inode, type);
if (acl != ACL_NOT_CACHED) if (acl != ACL_NOT_CACHED)
return acl; return acl;
...@@ -84,6 +87,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name, ...@@ -84,6 +87,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name,
struct posix_acl *acl; struct posix_acl *acl;
int ret = 0; int ret = 0;
if (!IS_POSIXACL(dentry->d_inode))
return -EOPNOTSUPP;
acl = btrfs_get_acl(dentry->d_inode, type); acl = btrfs_get_acl(dentry->d_inode, type);
if (IS_ERR(acl)) if (IS_ERR(acl))
......
...@@ -562,7 +562,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -562,7 +562,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
u64 em_len; u64 em_len;
u64 em_start; u64 em_start;
struct extent_map *em; struct extent_map *em;
int ret; int ret = -ENOMEM;
u32 *sums; u32 *sums;
tree = &BTRFS_I(inode)->io_tree; tree = &BTRFS_I(inode)->io_tree;
...@@ -577,6 +577,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -577,6 +577,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
compressed_len = em->block_len; compressed_len = em->block_len;
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
if (!cb)
goto out;
atomic_set(&cb->pending_bios, 0); atomic_set(&cb->pending_bios, 0);
cb->errors = 0; cb->errors = 0;
cb->inode = inode; cb->inode = inode;
...@@ -597,13 +600,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -597,13 +600,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
PAGE_CACHE_SIZE; PAGE_CACHE_SIZE;
cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
GFP_NOFS); GFP_NOFS);
if (!cb->compressed_pages)
goto fail1;
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
for (page_index = 0; page_index < nr_pages; page_index++) { for (page_index = 0; page_index < nr_pages; page_index++) {
cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
__GFP_HIGHMEM); __GFP_HIGHMEM);
if (!cb->compressed_pages[page_index])
goto fail2;
} }
cb->nr_pages = nr_pages; cb->nr_pages = nr_pages;
...@@ -614,6 +622,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -614,6 +622,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cb->len = uncompressed_len; cb->len = uncompressed_len;
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
if (!comp_bio)
goto fail2;
comp_bio->bi_private = cb; comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read; comp_bio->bi_end_io = end_compressed_bio_read;
atomic_inc(&cb->pending_bios); atomic_inc(&cb->pending_bios);
...@@ -681,6 +691,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -681,6 +691,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_put(comp_bio); bio_put(comp_bio);
return 0; return 0;
fail2:
for (page_index = 0; page_index < nr_pages; page_index++)
free_page((unsigned long)cb->compressed_pages[page_index]);
kfree(cb->compressed_pages);
fail1:
kfree(cb);
out:
free_extent_map(em);
return ret;
} }
static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
...@@ -900,7 +921,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, ...@@ -900,7 +921,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
return ret; return ret;
} }
void __exit btrfs_exit_compress(void) void btrfs_exit_compress(void)
{ {
free_workspaces(); free_workspaces();
} }
......
...@@ -1550,6 +1550,7 @@ static int transaction_kthread(void *arg) ...@@ -1550,6 +1550,7 @@ static int transaction_kthread(void *arg)
spin_unlock(&root->fs_info->new_trans_lock); spin_unlock(&root->fs_info->new_trans_lock);
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
if (transid == trans->transid) { if (transid == trans->transid) {
ret = btrfs_commit_transaction(trans, root); ret = btrfs_commit_transaction(trans, root);
BUG_ON(ret); BUG_ON(ret);
...@@ -2453,10 +2454,14 @@ int btrfs_commit_super(struct btrfs_root *root) ...@@ -2453,10 +2454,14 @@ int btrfs_commit_super(struct btrfs_root *root)
up_write(&root->fs_info->cleanup_work_sem); up_write(&root->fs_info->cleanup_work_sem);
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_commit_transaction(trans, root); ret = btrfs_commit_transaction(trans, root);
BUG_ON(ret); BUG_ON(ret);
/* run commit again to drop the original snapshot */ /* run commit again to drop the original snapshot */
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
btrfs_commit_transaction(trans, root); btrfs_commit_transaction(trans, root);
ret = btrfs_write_and_wait_transaction(NULL, root); ret = btrfs_write_and_wait_transaction(NULL, root);
BUG_ON(ret); BUG_ON(ret);
...@@ -2554,6 +2559,8 @@ int close_ctree(struct btrfs_root *root) ...@@ -2554,6 +2559,8 @@ int close_ctree(struct btrfs_root *root)
kfree(fs_info->chunk_root); kfree(fs_info->chunk_root);
kfree(fs_info->dev_root); kfree(fs_info->dev_root);
kfree(fs_info->csum_root); kfree(fs_info->csum_root);
kfree(fs_info);
return 0; return 0;
} }
......
...@@ -171,6 +171,8 @@ static struct dentry *btrfs_get_parent(struct dentry *child) ...@@ -171,6 +171,8 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
int ret; int ret;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path)
return ERR_PTR(-ENOMEM);
if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
key.objectid = root->root_key.objectid; key.objectid = root->root_key.objectid;
......
...@@ -320,11 +320,6 @@ static int caching_kthread(void *data) ...@@ -320,11 +320,6 @@ static int caching_kthread(void *data)
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
exclude_super_stripes(extent_root, block_group);
spin_lock(&block_group->space_info->lock);
block_group->space_info->bytes_readonly += block_group->bytes_super;
spin_unlock(&block_group->space_info->lock);
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
/* /*
...@@ -467,8 +462,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, ...@@ -467,8 +462,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
cache->cached = BTRFS_CACHE_NO; cache->cached = BTRFS_CACHE_NO;
} }
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
if (ret == 1) if (ret == 1) {
free_excluded_extents(fs_info->extent_root, cache);
return 0; return 0;
}
} }
if (load_cache_only) if (load_cache_only)
...@@ -3344,8 +3341,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, ...@@ -3344,8 +3341,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
u64 reserved; u64 reserved;
u64 max_reclaim; u64 max_reclaim;
u64 reclaimed = 0; u64 reclaimed = 0;
long time_left;
int pause = 1; int pause = 1;
int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
int loops = 0;
block_rsv = &root->fs_info->delalloc_block_rsv; block_rsv = &root->fs_info->delalloc_block_rsv;
space_info = block_rsv->space_info; space_info = block_rsv->space_info;
...@@ -3358,7 +3357,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, ...@@ -3358,7 +3357,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
max_reclaim = min(reserved, to_reclaim); max_reclaim = min(reserved, to_reclaim);
while (1) { while (loops < 1024) {
/* have the flusher threads jump in and do some IO */ /* have the flusher threads jump in and do some IO */
smp_mb(); smp_mb();
nr_pages = min_t(unsigned long, nr_pages, nr_pages = min_t(unsigned long, nr_pages,
...@@ -3366,8 +3365,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, ...@@ -3366,8 +3365,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
spin_lock(&space_info->lock); spin_lock(&space_info->lock);
if (reserved > space_info->bytes_reserved) if (reserved > space_info->bytes_reserved) {
loops = 0;
reclaimed += reserved - space_info->bytes_reserved; reclaimed += reserved - space_info->bytes_reserved;
} else {
loops++;
}
reserved = space_info->bytes_reserved; reserved = space_info->bytes_reserved;
spin_unlock(&space_info->lock); spin_unlock(&space_info->lock);
...@@ -3378,7 +3381,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, ...@@ -3378,7 +3381,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
return -EAGAIN; return -EAGAIN;
__set_current_state(TASK_INTERRUPTIBLE); __set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(pause); time_left = schedule_timeout(pause);
/* We were interrupted, exit */
if (time_left)
break;
pause <<= 1; pause <<= 1;
if (pause > HZ / 10) if (pause > HZ / 10)
pause = HZ / 10; pause = HZ / 10;
...@@ -3588,8 +3596,20 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, ...@@ -3588,8 +3596,20 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
if (num_bytes > 0) { if (num_bytes > 0) {
if (dest) { if (dest) {
block_rsv_add_bytes(dest, num_bytes, 0); spin_lock(&dest->lock);
} else { if (!dest->full) {
u64 bytes_to_add;
bytes_to_add = dest->size - dest->reserved;
bytes_to_add = min(num_bytes, bytes_to_add);
dest->reserved += bytes_to_add;
if (dest->reserved >= dest->size)
dest->full = 1;
num_bytes -= bytes_to_add;
}
spin_unlock(&dest->lock);
}
if (num_bytes) {
spin_lock(&space_info->lock); spin_lock(&space_info->lock);
space_info->bytes_reserved -= num_bytes; space_info->bytes_reserved -= num_bytes;
spin_unlock(&space_info->lock); spin_unlock(&space_info->lock);
...@@ -4012,6 +4032,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) ...@@ -4012,6 +4032,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
num_bytes = ALIGN(num_bytes, root->sectorsize); num_bytes = ALIGN(num_bytes, root->sectorsize);
atomic_dec(&BTRFS_I(inode)->outstanding_extents); atomic_dec(&BTRFS_I(inode)->outstanding_extents);
WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
spin_lock(&BTRFS_I(inode)->accounting_lock); spin_lock(&BTRFS_I(inode)->accounting_lock);
nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
...@@ -5633,6 +5654,7 @@ use_block_rsv(struct btrfs_trans_handle *trans, ...@@ -5633,6 +5654,7 @@ use_block_rsv(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u32 blocksize) struct btrfs_root *root, u32 blocksize)
{ {
struct btrfs_block_rsv *block_rsv; struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
int ret; int ret;
block_rsv = get_block_rsv(trans, root); block_rsv = get_block_rsv(trans, root);
...@@ -5640,14 +5662,39 @@ use_block_rsv(struct btrfs_trans_handle *trans, ...@@ -5640,14 +5662,39 @@ use_block_rsv(struct btrfs_trans_handle *trans,
if (block_rsv->size == 0) { if (block_rsv->size == 0) {
ret = reserve_metadata_bytes(trans, root, block_rsv, ret = reserve_metadata_bytes(trans, root, block_rsv,
blocksize, 0); blocksize, 0);
if (ret) /*
* If we couldn't reserve metadata bytes try and use some from
* the global reserve.
*/
if (ret && block_rsv != global_rsv) {
ret = block_rsv_use_bytes(global_rsv, blocksize);
if (!ret)
return global_rsv;
return ERR_PTR(ret);
} else if (ret) {
return ERR_PTR(ret); return ERR_PTR(ret);
}
return block_rsv; return block_rsv;
} }
ret = block_rsv_use_bytes(block_rsv, blocksize); ret = block_rsv_use_bytes(block_rsv, blocksize);
if (!ret) if (!ret)
return block_rsv; return block_rsv;
if (ret) {
WARN_ON(1);
ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
0);
if (!ret) {
spin_lock(&block_rsv->lock);
block_rsv->size += blocksize;
spin_unlock(&block_rsv->lock);
return block_rsv;
} else if (ret && block_rsv != global_rsv) {
ret = block_rsv_use_bytes(global_rsv, blocksize);
if (!ret)
return global_rsv;
}
}
return ERR_PTR(-ENOSPC); return ERR_PTR(-ENOSPC);
} }
...@@ -6221,6 +6268,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, ...@@ -6221,6 +6268,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
BUG_ON(!wc); BUG_ON(!wc);
trans = btrfs_start_transaction(tree_root, 0); trans = btrfs_start_transaction(tree_root, 0);
BUG_ON(IS_ERR(trans));
if (block_rsv) if (block_rsv)
trans->block_rsv = block_rsv; trans->block_rsv = block_rsv;
...@@ -6318,6 +6367,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, ...@@ -6318,6 +6367,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
btrfs_end_transaction_throttle(trans, tree_root); btrfs_end_transaction_throttle(trans, tree_root);
trans = btrfs_start_transaction(tree_root, 0); trans = btrfs_start_transaction(tree_root, 0);
BUG_ON(IS_ERR(trans));
if (block_rsv) if (block_rsv)
trans->block_rsv = block_rsv; trans->block_rsv = block_rsv;
} }
...@@ -6446,6 +6496,8 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start, ...@@ -6446,6 +6496,8 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start,
int ret = 0; int ret = 0;
ra = kzalloc(sizeof(*ra), GFP_NOFS); ra = kzalloc(sizeof(*ra), GFP_NOFS);
if (!ra)
return -ENOMEM;
mutex_lock(&inode->i_mutex); mutex_lock(&inode->i_mutex);
first_index = start >> PAGE_CACHE_SHIFT; first_index = start >> PAGE_CACHE_SHIFT;
...@@ -7477,7 +7529,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) ...@@ -7477,7 +7529,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
BUG_ON(reloc_root->commit_root != NULL); BUG_ON(reloc_root->commit_root != NULL);
while (1) { while (1) {
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
BUG_ON(!trans); BUG_ON(IS_ERR(trans));
mutex_lock(&root->fs_info->drop_mutex); mutex_lock(&root->fs_info->drop_mutex);
ret = btrfs_drop_snapshot(trans, reloc_root); ret = btrfs_drop_snapshot(trans, reloc_root);
...@@ -7535,7 +7587,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root) ...@@ -7535,7 +7587,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
if (found) { if (found) {
trans = btrfs_start_transaction(root, 1); trans = btrfs_start_transaction(root, 1);
BUG_ON(!trans); BUG_ON(IS_ERR(trans));
ret = btrfs_commit_transaction(trans, root); ret = btrfs_commit_transaction(trans, root);
BUG_ON(ret); BUG_ON(ret);
} }
...@@ -7779,7 +7831,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root, ...@@ -7779,7 +7831,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
trans = btrfs_start_transaction(extent_root, 1); trans = btrfs_start_transaction(extent_root, 1);
BUG_ON(!trans); BUG_ON(IS_ERR(trans));
if (extent_key->objectid == 0) { if (extent_key->objectid == 0) {
ret = del_extent_zero(trans, extent_root, path, extent_key); ret = del_extent_zero(trans, extent_root, path, extent_key);
...@@ -8270,6 +8322,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) ...@@ -8270,6 +8322,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
if (block_group->cached == BTRFS_CACHE_STARTED) if (block_group->cached == BTRFS_CACHE_STARTED)
wait_block_group_cache_done(block_group); wait_block_group_cache_done(block_group);
/*
* We haven't cached this block group, which means we could
* possibly have excluded extents on this block group.
*/
if (block_group->cached == BTRFS_CACHE_NO)
free_excluded_extents(info->extent_root, block_group);
btrfs_remove_free_space_cache(block_group); btrfs_remove_free_space_cache(block_group);
btrfs_put_block_group(block_group); btrfs_put_block_group(block_group);
...@@ -8384,6 +8443,13 @@ int btrfs_read_block_groups(struct btrfs_root *root) ...@@ -8384,6 +8443,13 @@ int btrfs_read_block_groups(struct btrfs_root *root)
cache->flags = btrfs_block_group_flags(&cache->item); cache->flags = btrfs_block_group_flags(&cache->item);
cache->sectorsize = root->sectorsize; cache->sectorsize = root->sectorsize;
/*
* We need to exclude the super stripes now so that the space
* info has super bytes accounted for, otherwise we'll think
* we have more space than we actually do.
*/
exclude_super_stripes(root, cache);
/* /*
* check for two cases, either we are full, and therefore * check for two cases, either we are full, and therefore
* don't need to bother with the caching work since we won't * don't need to bother with the caching work since we won't
...@@ -8392,12 +8458,10 @@ int btrfs_read_block_groups(struct btrfs_root *root) ...@@ -8392,12 +8458,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
* time, particularly in the full case. * time, particularly in the full case.
*/ */
if (found_key.offset == btrfs_block_group_used(&cache->item)) { if (found_key.offset == btrfs_block_group_used(&cache->item)) {
exclude_super_stripes(root, cache);
cache->last_byte_to_unpin = (u64)-1; cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED; cache->cached = BTRFS_CACHE_FINISHED;
free_excluded_extents(root, cache); free_excluded_extents(root, cache);
} else if (btrfs_block_group_used(&cache->item) == 0) { } else if (btrfs_block_group_used(&cache->item) == 0) {
exclude_super_stripes(root, cache);
cache->last_byte_to_unpin = (u64)-1; cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED; cache->cached = BTRFS_CACHE_FINISHED;
add_new_free_space(cache, root->fs_info, add_new_free_space(cache, root->fs_info,
......
...@@ -1865,7 +1865,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num, ...@@ -1865,7 +1865,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
bio_get(bio); bio_get(bio);
if (tree->ops && tree->ops->submit_bio_hook) if (tree->ops && tree->ops->submit_bio_hook)
tree->ops->submit_bio_hook(page->mapping->host, rw, bio, ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
mirror_num, bio_flags, start); mirror_num, bio_flags, start);
else else
submit_bio(rw, bio); submit_bio(rw, bio);
...@@ -1920,6 +1920,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, ...@@ -1920,6 +1920,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
nr = bio_get_nr_vecs(bdev); nr = bio_get_nr_vecs(bdev);
bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
if (!bio)
return -ENOMEM;
bio_add_page(bio, page, page_size, offset); bio_add_page(bio, page, page_size, offset);
bio->bi_end_io = end_io_func; bio->bi_end_io = end_io_func;
...@@ -2126,7 +2128,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, ...@@ -2126,7 +2128,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
&bio_flags); &bio_flags);
if (bio) if (bio)
submit_one_bio(READ, bio, 0, bio_flags); ret = submit_one_bio(READ, bio, 0, bio_flags);
return ret; return ret;
} }
......
...@@ -536,6 +536,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, ...@@ -536,6 +536,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
root = root->fs_info->csum_root; root = root->fs_info->csum_root;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
while (1) { while (1) {
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
...@@ -548,7 +550,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, ...@@ -548,7 +550,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
if (path->slots[0] == 0) if (path->slots[0] == 0)
goto out; goto out;
path->slots[0]--; path->slots[0]--;
} else if (ret < 0) {
goto out;
} }
leaf = path->nodes[0]; leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
......
...@@ -793,8 +793,12 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, ...@@ -793,8 +793,12 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
pages[i] = grab_cache_page(inode->i_mapping, index + i); pages[i] = grab_cache_page(inode->i_mapping, index + i);
if (!pages[i]) { if (!pages[i]) {
err = -ENOMEM; int c;
BUG_ON(1); for (c = i - 1; c >= 0; c--) {
unlock_page(pages[c]);
page_cache_release(pages[c]);
}
return -ENOMEM;
} }
wait_on_page_writeback(pages[i]); wait_on_page_writeback(pages[i]);
} }
...@@ -946,6 +950,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, ...@@ -946,6 +950,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
(sizeof(struct page *))); (sizeof(struct page *)));
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
/* generic_write_checks can change our pos */ /* generic_write_checks can change our pos */
start_pos = pos; start_pos = pos;
...@@ -984,8 +992,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, ...@@ -984,8 +992,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
size_t write_bytes = min(iov_iter_count(&i), size_t write_bytes = min(iov_iter_count(&i),
nrptrs * (size_t)PAGE_CACHE_SIZE - nrptrs * (size_t)PAGE_CACHE_SIZE -
offset); offset);
size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> size_t num_pages = (write_bytes + offset +
PAGE_CACHE_SHIFT; PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
WARN_ON(num_pages > nrptrs); WARN_ON(num_pages > nrptrs);
memset(pages, 0, sizeof(struct page *) * nrptrs); memset(pages, 0, sizeof(struct page *) * nrptrs);
...@@ -1015,8 +1023,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, ...@@ -1015,8 +1023,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
copied = btrfs_copy_from_user(pos, num_pages, copied = btrfs_copy_from_user(pos, num_pages,
write_bytes, pages, &i); write_bytes, pages, &i);
dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >> dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT; PAGE_CACHE_SHIFT;
if (num_pages > dirty_pages) { if (num_pages > dirty_pages) {
if (copied > 0) if (copied > 0)
......
...@@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, ...@@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
return entry; return entry;
} }
static void unlink_free_space(struct btrfs_block_group_cache *block_group, static inline void
struct btrfs_free_space *info) __unlink_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *info)
{ {
rb_erase(&info->offset_index, &block_group->free_space_offset); rb_erase(&info->offset_index, &block_group->free_space_offset);
block_group->free_extents--; block_group->free_extents--;
}
static void unlink_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *info)
{
__unlink_free_space(block_group, info);
block_group->free_space -= info->bytes; block_group->free_space -= info->bytes;
} }
...@@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) ...@@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
u64 max_bytes; u64 max_bytes;
u64 bitmap_bytes; u64 bitmap_bytes;
u64 extent_bytes; u64 extent_bytes;
u64 size = block_group->key.offset;
/* /*
* The goal is to keep the total amount of memory used per 1gb of space * The goal is to keep the total amount of memory used per 1gb of space
* at or below 32k, so we need to adjust how much memory we allow to be * at or below 32k, so we need to adjust how much memory we allow to be
* used by extent based free space tracking * used by extent based free space tracking
*/ */
max_bytes = MAX_CACHE_BYTES_PER_GIG * if (size < 1024 * 1024 * 1024)
(div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); max_bytes = MAX_CACHE_BYTES_PER_GIG;
else
max_bytes = MAX_CACHE_BYTES_PER_GIG *
div64_u64(size, 1024 * 1024 * 1024);
/* /*
* we want to account for 1 more bitmap than what we have so we can make * we want to account for 1 more bitmap than what we have so we can make
...@@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group, ...@@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
recalculate_thresholds(block_group); recalculate_thresholds(block_group);
} }
static void free_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *bitmap_info)
{
unlink_free_space(block_group, bitmap_info);
kfree(bitmap_info->bitmap);
kfree(bitmap_info);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *bitmap_info, struct btrfs_free_space *bitmap_info,
u64 *offset, u64 *bytes) u64 *offset, u64 *bytes)
...@@ -1195,6 +1216,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro ...@@ -1195,6 +1216,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro
*/ */
search_start = *offset; search_start = *offset;
search_bytes = *bytes; search_bytes = *bytes;
search_bytes = min(search_bytes, end - search_start + 1);
ret = search_bitmap(block_group, bitmap_info, &search_start, ret = search_bitmap(block_group, bitmap_info, &search_start,
&search_bytes); &search_bytes);
BUG_ON(ret < 0 || search_start != *offset); BUG_ON(ret < 0 || search_start != *offset);
...@@ -1211,13 +1233,8 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro ...@@ -1211,13 +1233,8 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro
if (*bytes) { if (*bytes) {
struct rb_node *next = rb_next(&bitmap_info->offset_index); struct rb_node *next = rb_next(&bitmap_info->offset_index);
if (!bitmap_info->bytes) { if (!bitmap_info->bytes)
unlink_free_space(block_group, bitmap_info); free_bitmap(block_group, bitmap_info);
kfree(bitmap_info->bitmap);
kfree(bitmap_info);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
/* /*
* no entry after this bitmap, but we still have bytes to * no entry after this bitmap, but we still have bytes to
...@@ -1250,13 +1267,8 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro ...@@ -1250,13 +1267,8 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro
return -EAGAIN; return -EAGAIN;
goto again; goto again;
} else if (!bitmap_info->bytes) { } else if (!bitmap_info->bytes)
unlink_free_space(block_group, bitmap_info); free_bitmap(block_group, bitmap_info);
kfree(bitmap_info->bitmap);
kfree(bitmap_info);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
return 0; return 0;
} }
...@@ -1359,22 +1371,14 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, ...@@ -1359,22 +1371,14 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
return ret; return ret;
} }
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes) struct btrfs_free_space *info, bool update_stat)
{ {
struct btrfs_free_space *right_info = NULL; struct btrfs_free_space *left_info;
struct btrfs_free_space *left_info = NULL; struct btrfs_free_space *right_info;
struct btrfs_free_space *info = NULL; bool merged = false;
int ret = 0; u64 offset = info->offset;
u64 bytes = info->bytes;
info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
if (!info)
return -ENOMEM;
info->offset = offset;
info->bytes = bytes;
spin_lock(&block_group->tree_lock);
/* /*
* first we want to see if there is free space adjacent to the range we * first we want to see if there is free space adjacent to the range we
...@@ -1388,37 +1392,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, ...@@ -1388,37 +1392,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
else else
left_info = tree_search_offset(block_group, offset - 1, 0, 0); left_info = tree_search_offset(block_group, offset - 1, 0, 0);
/*
* If there was no extent directly to the left or right of this new
* extent then we know we're going to have to allocate a new extent, so
* before we do that see if we need to drop this into a bitmap
*/
if ((!left_info || left_info->bitmap) &&
(!right_info || right_info->bitmap)) {
ret = insert_into_bitmap(block_group, info);
if (ret < 0) {
goto out;
} else if (ret) {
ret = 0;
goto out;
}
}
if (right_info && !right_info->bitmap) { if (right_info && !right_info->bitmap) {
unlink_free_space(block_group, right_info); if (update_stat)
unlink_free_space(block_group, right_info);
else
__unlink_free_space(block_group, right_info);
info->bytes += right_info->bytes; info->bytes += right_info->bytes;
kfree(right_info); kfree(right_info);
merged = true;
} }
if (left_info && !left_info->bitmap && if (left_info && !left_info->bitmap &&
left_info->offset + left_info->bytes == offset) { left_info->offset + left_info->bytes == offset) {
unlink_free_space(block_group, left_info); if (update_stat)
unlink_free_space(block_group, left_info);
else
__unlink_free_space(block_group, left_info);
info->offset = left_info->offset; info->offset = left_info->offset;
info->bytes += left_info->bytes; info->bytes += left_info->bytes;
kfree(left_info); kfree(left_info);
merged = true;
} }
return merged;
}
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes)
{
struct btrfs_free_space *info;
int ret = 0;
info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
if (!info)
return -ENOMEM;
info->offset = offset;
info->bytes = bytes;
spin_lock(&block_group->tree_lock);
if (try_merge_free_space(block_group, info, true))
goto link;
/*
* There was no extent directly to the left or right of this new
* extent then we know we're going to have to allocate a new extent, so
* before we do that see if we need to drop this into a bitmap
*/
ret = insert_into_bitmap(block_group, info);
if (ret < 0) {
goto out;
} else if (ret) {
ret = 0;
goto out;
}
link:
ret = link_free_space(block_group, info); ret = link_free_space(block_group, info);
if (ret) if (ret)
kfree(info); kfree(info);
...@@ -1621,6 +1650,7 @@ __btrfs_return_cluster_to_free_space( ...@@ -1621,6 +1650,7 @@ __btrfs_return_cluster_to_free_space(
node = rb_next(&entry->offset_index); node = rb_next(&entry->offset_index);
rb_erase(&entry->offset_index, &cluster->root); rb_erase(&entry->offset_index, &cluster->root);
BUG_ON(entry->bitmap); BUG_ON(entry->bitmap);
try_merge_free_space(block_group, entry, false);
tree_insert_offset(&block_group->free_space_offset, tree_insert_offset(&block_group->free_space_offset,
entry->offset, &entry->offset_index, 0); entry->offset, &entry->offset_index, 0);
} }
...@@ -1685,13 +1715,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ...@@ -1685,13 +1715,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
ret = offset; ret = offset;
if (entry->bitmap) { if (entry->bitmap) {
bitmap_clear_bits(block_group, entry, offset, bytes); bitmap_clear_bits(block_group, entry, offset, bytes);
if (!entry->bytes) { if (!entry->bytes)
unlink_free_space(block_group, entry); free_bitmap(block_group, entry);
kfree(entry->bitmap);
kfree(entry);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
} else { } else {
unlink_free_space(block_group, entry); unlink_free_space(block_group, entry);
entry->offset += bytes; entry->offset += bytes;
...@@ -1789,6 +1814,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, ...@@ -1789,6 +1814,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
ret = search_start; ret = search_start;
bitmap_clear_bits(block_group, entry, ret, bytes); bitmap_clear_bits(block_group, entry, ret, bytes);
if (entry->bytes == 0)
free_bitmap(block_group, entry);
out: out:
spin_unlock(&cluster->lock); spin_unlock(&cluster->lock);
spin_unlock(&block_group->tree_lock); spin_unlock(&block_group->tree_lock);
...@@ -1842,15 +1869,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, ...@@ -1842,15 +1869,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
entry->offset += bytes; entry->offset += bytes;
entry->bytes -= bytes; entry->bytes -= bytes;
if (entry->bytes == 0) { if (entry->bytes == 0)
rb_erase(&entry->offset_index, &cluster->root); rb_erase(&entry->offset_index, &cluster->root);
kfree(entry);
}
break; break;
} }
out: out:
spin_unlock(&cluster->lock); spin_unlock(&cluster->lock);
if (!ret)
return 0;
spin_lock(&block_group->tree_lock);
block_group->free_space -= bytes;
if (entry->bytes == 0) {
block_group->free_extents--;
kfree(entry);
}
spin_unlock(&block_group->tree_lock);
return ret; return ret;
} }
......
...@@ -416,7 +416,7 @@ static noinline int compress_file_range(struct inode *inode, ...@@ -416,7 +416,7 @@ static noinline int compress_file_range(struct inode *inode,
} }
if (start == 0) { if (start == 0) {
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
BUG_ON(!trans); BUG_ON(IS_ERR(trans));
btrfs_set_trans_block_group(trans, inode); btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv; trans->block_rsv = &root->fs_info->delalloc_block_rsv;
...@@ -612,6 +612,7 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -612,6 +612,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
GFP_NOFS); GFP_NOFS);
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
ret = btrfs_reserve_extent(trans, root, ret = btrfs_reserve_extent(trans, root,
async_extent->compressed_size, async_extent->compressed_size,
async_extent->compressed_size, async_extent->compressed_size,
...@@ -771,7 +772,7 @@ static noinline int cow_file_range(struct inode *inode, ...@@ -771,7 +772,7 @@ static noinline int cow_file_range(struct inode *inode,
BUG_ON(root == root->fs_info->tree_root); BUG_ON(root == root->fs_info->tree_root);
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
BUG_ON(!trans); BUG_ON(IS_ERR(trans));
btrfs_set_trans_block_group(trans, inode); btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv; trans->block_rsv = &root->fs_info->delalloc_block_rsv;
...@@ -1049,7 +1050,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, ...@@ -1049,7 +1050,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
} else { } else {
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
} }
BUG_ON(!trans); BUG_ON(IS_ERR(trans));
cow_start = (u64)-1; cow_start = (u64)-1;
cur_offset = start; cur_offset = start;
...@@ -1557,6 +1558,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) ...@@ -1557,6 +1558,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
out_page: out_page:
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
kfree(fixup);
} }
/* /*
...@@ -1703,7 +1705,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ...@@ -1703,7 +1705,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
trans = btrfs_join_transaction_nolock(root, 1); trans = btrfs_join_transaction_nolock(root, 1);
else else
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
BUG_ON(!trans); BUG_ON(IS_ERR(trans));
btrfs_set_trans_block_group(trans, inode); btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv; trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode(trans, root, inode); ret = btrfs_update_inode(trans, root, inode);
...@@ -1720,6 +1722,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ...@@ -1720,6 +1722,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
trans = btrfs_join_transaction_nolock(root, 1); trans = btrfs_join_transaction_nolock(root, 1);
else else
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
btrfs_set_trans_block_group(trans, inode); btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv; trans->block_rsv = &root->fs_info->delalloc_block_rsv;
...@@ -2354,6 +2357,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) ...@@ -2354,6 +2357,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
*/ */
if (is_bad_inode(inode)) { if (is_bad_inode(inode)) {
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
BUG_ON(IS_ERR(trans));
btrfs_orphan_del(trans, inode); btrfs_orphan_del(trans, inode);
btrfs_end_transaction(trans, root); btrfs_end_transaction(trans, root);
iput(inode); iput(inode);
...@@ -2381,6 +2385,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) ...@@ -2381,6 +2385,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
if (root->orphan_block_rsv || root->orphan_item_inserted) { if (root->orphan_block_rsv || root->orphan_item_inserted) {
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
btrfs_end_transaction(trans, root); btrfs_end_transaction(trans, root);
} }
...@@ -2641,7 +2646,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, ...@@ -2641,7 +2646,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) { if (!path) {
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto out;
} }
path->leave_spinning = 1; path->leave_spinning = 1;
...@@ -2714,9 +2719,10 @@ static int check_path_shared(struct btrfs_root *root, ...@@ -2714,9 +2719,10 @@ static int check_path_shared(struct btrfs_root *root,
struct extent_buffer *eb; struct extent_buffer *eb;
int level; int level;
u64 refs = 1; u64 refs = 1;
int uninitialized_var(ret);
for (level = 0; level < BTRFS_MAX_LEVEL; level++) { for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
int ret;
if (!path->nodes[level]) if (!path->nodes[level])
break; break;
eb = path->nodes[level]; eb = path->nodes[level];
...@@ -2727,7 +2733,7 @@ static int check_path_shared(struct btrfs_root *root, ...@@ -2727,7 +2733,7 @@ static int check_path_shared(struct btrfs_root *root,
if (refs > 1) if (refs > 1)
return 1; return 1;
} }
return ret; /* XXX callers? */ return 0;
} }
/* /*
...@@ -4134,7 +4140,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) ...@@ -4134,7 +4140,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
} }
srcu_read_unlock(&root->fs_info->subvol_srcu, index); srcu_read_unlock(&root->fs_info->subvol_srcu, index);
if (root != sub_root) { if (!IS_ERR(inode) && root != sub_root) {
down_read(&root->fs_info->cleanup_work_sem); down_read(&root->fs_info->cleanup_work_sem);
if (!(inode->i_sb->s_flags & MS_RDONLY)) if (!(inode->i_sb->s_flags & MS_RDONLY))
btrfs_orphan_cleanup(sub_root); btrfs_orphan_cleanup(sub_root);
...@@ -4347,6 +4353,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) ...@@ -4347,6 +4353,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
trans = btrfs_join_transaction_nolock(root, 1); trans = btrfs_join_transaction_nolock(root, 1);
else else
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
btrfs_set_trans_block_group(trans, inode); btrfs_set_trans_block_group(trans, inode);
if (nolock) if (nolock)
ret = btrfs_end_transaction_nolock(trans, root); ret = btrfs_end_transaction_nolock(trans, root);
...@@ -4372,6 +4380,7 @@ void btrfs_dirty_inode(struct inode *inode) ...@@ -4372,6 +4380,7 @@ void btrfs_dirty_inode(struct inode *inode)
return; return;
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
btrfs_set_trans_block_group(trans, inode); btrfs_set_trans_block_group(trans, inode);
ret = btrfs_update_inode(trans, root, inode); ret = btrfs_update_inode(trans, root, inode);
...@@ -5176,6 +5185,8 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, ...@@ -5176,6 +5185,8 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
em = NULL; em = NULL;
btrfs_release_path(root, path); btrfs_release_path(root, path);
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
if (IS_ERR(trans))
return ERR_CAST(trans);
goto again; goto again;
} }
map = kmap(page); map = kmap(page);
...@@ -5280,8 +5291,8 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, ...@@ -5280,8 +5291,8 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
btrfs_drop_extent_cache(inode, start, start + len - 1, 0); btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
trans = btrfs_join_transaction(root, 0); trans = btrfs_join_transaction(root, 0);
if (!trans) if (IS_ERR(trans))
return ERR_PTR(-ENOMEM); return ERR_CAST(trans);
trans->block_rsv = &root->fs_info->delalloc_block_rsv; trans->block_rsv = &root->fs_info->delalloc_block_rsv;
...@@ -5505,7 +5516,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, ...@@ -5505,7 +5516,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
* while we look for nocow cross refs * while we look for nocow cross refs
*/ */
trans = btrfs_join_transaction(root, 0); trans = btrfs_join_transaction(root, 0);
if (!trans) if (IS_ERR(trans))
goto must_cow; goto must_cow;
if (can_nocow_odirect(trans, inode, start, len) == 1) { if (can_nocow_odirect(trans, inode, start, len) == 1) {
...@@ -5640,7 +5651,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) ...@@ -5640,7 +5651,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
BUG_ON(!ordered); BUG_ON(!ordered);
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
if (!trans) { if (IS_ERR(trans)) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
......
...@@ -203,7 +203,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) ...@@ -203,7 +203,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
trans = btrfs_join_transaction(root, 1); trans = btrfs_join_transaction(root, 1);
BUG_ON(!trans); BUG_ON(IS_ERR(trans));
ret = btrfs_update_inode(trans, root, inode); ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret); BUG_ON(ret);
...@@ -907,6 +907,10 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, ...@@ -907,6 +907,10 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
if (new_size > old_size) { if (new_size > old_size) {
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_unlock;
}
ret = btrfs_grow_device(trans, device, new_size); ret = btrfs_grow_device(trans, device, new_size);
btrfs_commit_transaction(trans, root); btrfs_commit_transaction(trans, root);
} else { } else {
...@@ -1898,7 +1902,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -1898,7 +1902,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
memcpy(&new_key, &key, sizeof(new_key)); memcpy(&new_key, &key, sizeof(new_key));
new_key.objectid = inode->i_ino; new_key.objectid = inode->i_ino;
new_key.offset = key.offset + destoff - off; if (off <= key.offset)
new_key.offset = key.offset + destoff - off;
else
new_key.offset = destoff;
trans = btrfs_start_transaction(root, 1); trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
...@@ -2082,7 +2089,7 @@ static long btrfs_ioctl_trans_start(struct file *file) ...@@ -2082,7 +2089,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
ret = -ENOMEM; ret = -ENOMEM;
trans = btrfs_start_ioctl_transaction(root, 0); trans = btrfs_start_ioctl_transaction(root, 0);
if (!trans) if (IS_ERR(trans))
goto out_drop; goto out_drop;
file->private_data = trans; file->private_data = trans;
...@@ -2138,9 +2145,9 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) ...@@ -2138,9 +2145,9 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
path->leave_spinning = 1; path->leave_spinning = 1;
trans = btrfs_start_transaction(root, 1); trans = btrfs_start_transaction(root, 1);
if (!trans) { if (IS_ERR(trans)) {
btrfs_free_path(path); btrfs_free_path(path);
return -ENOMEM; return PTR_ERR(trans);
} }
dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
...@@ -2334,6 +2341,8 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp ...@@ -2334,6 +2341,8 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp
u64 transid; u64 transid;
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
transid = trans->transid; transid = trans->transid;
btrfs_commit_transaction_async(trans, root, 0); btrfs_commit_transaction_async(trans, root, 0);
......
...@@ -141,7 +141,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, ...@@ -141,7 +141,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
u64 file_offset) u64 file_offset)
{ {
struct rb_root *root = &tree->tree; struct rb_root *root = &tree->tree;
struct rb_node *prev; struct rb_node *prev = NULL;
struct rb_node *ret; struct rb_node *ret;
struct btrfs_ordered_extent *entry; struct btrfs_ordered_extent *entry;
......
...@@ -260,6 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) ...@@ -260,6 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
#else #else
BUG(); BUG();
#endif #endif
break;
case BTRFS_BLOCK_GROUP_ITEM_KEY: case BTRFS_BLOCK_GROUP_ITEM_KEY:
bi = btrfs_item_ptr(l, i, bi = btrfs_item_ptr(l, i,
struct btrfs_block_group_item); struct btrfs_block_group_item);
......
...@@ -2028,6 +2028,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, ...@@ -2028,6 +2028,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
while (1) { while (1) {
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
BUG_ON(IS_ERR(trans));
trans->block_rsv = rc->block_rsv; trans->block_rsv = rc->block_rsv;
ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, ret = btrfs_block_rsv_check(trans, root, rc->block_rsv,
...@@ -2147,6 +2148,12 @@ int prepare_to_merge(struct reloc_control *rc, int err) ...@@ -2147,6 +2148,12 @@ int prepare_to_merge(struct reloc_control *rc, int err)
} }
trans = btrfs_join_transaction(rc->extent_root, 1); trans = btrfs_join_transaction(rc->extent_root, 1);
if (IS_ERR(trans)) {
if (!err)
btrfs_block_rsv_release(rc->extent_root,
rc->block_rsv, num_bytes);
return PTR_ERR(trans);
}
if (!err) { if (!err) {
if (num_bytes != rc->merging_rsv_size) { if (num_bytes != rc->merging_rsv_size) {
...@@ -3222,6 +3229,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, ...@@ -3222,6 +3229,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
trans = btrfs_join_transaction(root, 0); trans = btrfs_join_transaction(root, 0);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
btrfs_free_path(path); btrfs_free_path(path);
ret = PTR_ERR(trans);
goto out; goto out;
} }
...@@ -3628,6 +3636,7 @@ int prepare_to_relocate(struct reloc_control *rc) ...@@ -3628,6 +3636,7 @@ int prepare_to_relocate(struct reloc_control *rc)
set_reloc_control(rc); set_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root, 1); trans = btrfs_join_transaction(rc->extent_root, 1);
BUG_ON(IS_ERR(trans));
btrfs_commit_transaction(trans, rc->extent_root); btrfs_commit_transaction(trans, rc->extent_root);
return 0; return 0;
} }
...@@ -3657,6 +3666,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) ...@@ -3657,6 +3666,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
while (1) { while (1) {
trans = btrfs_start_transaction(rc->extent_root, 0); trans = btrfs_start_transaction(rc->extent_root, 0);
BUG_ON(IS_ERR(trans));
if (update_backref_cache(trans, &rc->backref_cache)) { if (update_backref_cache(trans, &rc->backref_cache)) {
btrfs_end_transaction(trans, rc->extent_root); btrfs_end_transaction(trans, rc->extent_root);
...@@ -3804,7 +3814,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) ...@@ -3804,7 +3814,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
/* get rid of pinned extents */ /* get rid of pinned extents */
trans = btrfs_join_transaction(rc->extent_root, 1); trans = btrfs_join_transaction(rc->extent_root, 1);
btrfs_commit_transaction(trans, rc->extent_root); if (IS_ERR(trans))
err = PTR_ERR(trans);
else
btrfs_commit_transaction(trans, rc->extent_root);
out_free: out_free:
btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
btrfs_free_path(path); btrfs_free_path(path);
...@@ -4022,6 +4035,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) ...@@ -4022,6 +4035,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
int ret; int ret;
trans = btrfs_start_transaction(root->fs_info->tree_root, 0); trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
BUG_ON(IS_ERR(trans));
memset(&root->root_item.drop_progress, 0, memset(&root->root_item.drop_progress, 0,
sizeof(root->root_item.drop_progress)); sizeof(root->root_item.drop_progress));
...@@ -4125,6 +4139,11 @@ int btrfs_recover_relocation(struct btrfs_root *root) ...@@ -4125,6 +4139,11 @@ int btrfs_recover_relocation(struct btrfs_root *root)
set_reloc_control(rc); set_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root, 1); trans = btrfs_join_transaction(rc->extent_root, 1);
if (IS_ERR(trans)) {
unset_reloc_control(rc);
err = PTR_ERR(trans);
goto out_free;
}
rc->merge_reloc_tree = 1; rc->merge_reloc_tree = 1;
...@@ -4154,9 +4173,13 @@ int btrfs_recover_relocation(struct btrfs_root *root) ...@@ -4154,9 +4173,13 @@ int btrfs_recover_relocation(struct btrfs_root *root)
unset_reloc_control(rc); unset_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root, 1); trans = btrfs_join_transaction(rc->extent_root, 1);
btrfs_commit_transaction(trans, rc->extent_root); if (IS_ERR(trans))
out: err = PTR_ERR(trans);
else
btrfs_commit_transaction(trans, rc->extent_root);
out_free:
kfree(rc); kfree(rc);
out:
while (!list_empty(&reloc_roots)) { while (!list_empty(&reloc_roots)) {
reloc_root = list_entry(reloc_roots.next, reloc_root = list_entry(reloc_roots.next,
struct btrfs_root, root_list); struct btrfs_root, root_list);
......
...@@ -383,7 +383,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, ...@@ -383,7 +383,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
struct btrfs_fs_devices **fs_devices) struct btrfs_fs_devices **fs_devices)
{ {
substring_t args[MAX_OPT_ARGS]; substring_t args[MAX_OPT_ARGS];
char *opts, *p; char *opts, *orig, *p;
int error = 0; int error = 0;
int intarg; int intarg;
...@@ -397,6 +397,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, ...@@ -397,6 +397,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
opts = kstrdup(options, GFP_KERNEL); opts = kstrdup(options, GFP_KERNEL);
if (!opts) if (!opts)
return -ENOMEM; return -ENOMEM;
orig = opts;
while ((p = strsep(&opts, ",")) != NULL) { while ((p = strsep(&opts, ",")) != NULL) {
int token; int token;
...@@ -432,7 +433,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, ...@@ -432,7 +433,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
} }
out_free_opts: out_free_opts:
kfree(opts); kfree(orig);
out: out:
/* /*
* If no subvolume name is specified we use the default one. Allocate * If no subvolume name is specified we use the default one. Allocate
...@@ -623,6 +624,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait) ...@@ -623,6 +624,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
btrfs_wait_ordered_extents(root, 0, 0); btrfs_wait_ordered_extents(root, 0, 0);
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_commit_transaction(trans, root); ret = btrfs_commit_transaction(trans, root);
return ret; return ret;
} }
...@@ -761,6 +764,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, ...@@ -761,6 +764,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
} }
btrfs_close_devices(fs_devices); btrfs_close_devices(fs_devices);
kfree(fs_info);
kfree(tree_root);
} else { } else {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
......
...@@ -1161,6 +1161,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, ...@@ -1161,6 +1161,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
INIT_DELAYED_WORK(&ac->work, do_async_commit); INIT_DELAYED_WORK(&ac->work, do_async_commit);
ac->root = root; ac->root = root;
ac->newtrans = btrfs_join_transaction(root, 0); ac->newtrans = btrfs_join_transaction(root, 0);
if (IS_ERR(ac->newtrans)) {
int err = PTR_ERR(ac->newtrans);
kfree(ac);
return err;
}
/* take transaction reference */ /* take transaction reference */
mutex_lock(&root->fs_info->trans_mutex); mutex_lock(&root->fs_info->trans_mutex);
......
...@@ -338,6 +338,12 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, ...@@ -338,6 +338,12 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
} }
dst_copy = kmalloc(item_size, GFP_NOFS); dst_copy = kmalloc(item_size, GFP_NOFS);
src_copy = kmalloc(item_size, GFP_NOFS); src_copy = kmalloc(item_size, GFP_NOFS);
if (!dst_copy || !src_copy) {
btrfs_release_path(root, path);
kfree(dst_copy);
kfree(src_copy);
return -ENOMEM;
}
read_extent_buffer(eb, src_copy, src_ptr, item_size); read_extent_buffer(eb, src_copy, src_ptr, item_size);
...@@ -665,6 +671,9 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, ...@@ -665,6 +671,9 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
btrfs_dir_item_key_to_cpu(leaf, di, &location); btrfs_dir_item_key_to_cpu(leaf, di, &location);
name_len = btrfs_dir_name_len(leaf, di); name_len = btrfs_dir_name_len(leaf, di);
name = kmalloc(name_len, GFP_NOFS); name = kmalloc(name_len, GFP_NOFS);
if (!name)
return -ENOMEM;
read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
btrfs_release_path(root, path); btrfs_release_path(root, path);
...@@ -744,6 +753,9 @@ static noinline int backref_in_log(struct btrfs_root *log, ...@@ -744,6 +753,9 @@ static noinline int backref_in_log(struct btrfs_root *log,
int match = 0; int match = 0;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(NULL, log, key, path, 0, 0); ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
if (ret != 0) if (ret != 0)
goto out; goto out;
...@@ -967,6 +979,8 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, ...@@ -967,6 +979,8 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
key.offset = (u64)-1; key.offset = (u64)-1;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
while (1) { while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
...@@ -1178,6 +1192,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, ...@@ -1178,6 +1192,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
name_len = btrfs_dir_name_len(eb, di); name_len = btrfs_dir_name_len(eb, di);
name = kmalloc(name_len, GFP_NOFS); name = kmalloc(name_len, GFP_NOFS);
if (!name)
return -ENOMEM;
log_type = btrfs_dir_type(eb, di); log_type = btrfs_dir_type(eb, di);
read_extent_buffer(eb, name, (unsigned long)(di + 1), read_extent_buffer(eb, name, (unsigned long)(di + 1),
name_len); name_len);
...@@ -1692,6 +1709,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, ...@@ -1692,6 +1709,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
root_owner = btrfs_header_owner(parent); root_owner = btrfs_header_owner(parent);
next = btrfs_find_create_tree_block(root, bytenr, blocksize); next = btrfs_find_create_tree_block(root, bytenr, blocksize);
if (!next)
return -ENOMEM;
if (*level == 1) { if (*level == 1) {
wc->process_func(root, next, wc, ptr_gen); wc->process_func(root, next, wc, ptr_gen);
...@@ -2032,6 +2051,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, ...@@ -2032,6 +2051,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
wait_log_commit(trans, log_root_tree, wait_log_commit(trans, log_root_tree,
log_root_tree->log_transid); log_root_tree->log_transid);
mutex_unlock(&log_root_tree->log_mutex); mutex_unlock(&log_root_tree->log_mutex);
ret = 0;
goto out; goto out;
} }
atomic_set(&log_root_tree->log_commit[index2], 1); atomic_set(&log_root_tree->log_commit[index2], 1);
...@@ -2096,7 +2116,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, ...@@ -2096,7 +2116,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
smp_mb(); smp_mb();
if (waitqueue_active(&root->log_commit_wait[index1])) if (waitqueue_active(&root->log_commit_wait[index1]))
wake_up(&root->log_commit_wait[index1]); wake_up(&root->log_commit_wait[index1]);
return 0; return ret;
} }
static void free_log_tree(struct btrfs_trans_handle *trans, static void free_log_tree(struct btrfs_trans_handle *trans,
...@@ -2194,6 +2214,9 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, ...@@ -2194,6 +2214,9 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
log = root->log_root; log = root->log_root;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
name, name_len, -1); name, name_len, -1);
if (IS_ERR(di)) { if (IS_ERR(di)) {
...@@ -2594,6 +2617,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, ...@@ -2594,6 +2617,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
ins_data = kmalloc(nr * sizeof(struct btrfs_key) + ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
nr * sizeof(u32), GFP_NOFS); nr * sizeof(u32), GFP_NOFS);
if (!ins_data)
return -ENOMEM;
ins_sizes = (u32 *)ins_data; ins_sizes = (u32 *)ins_data;
ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
...@@ -2725,7 +2751,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, ...@@ -2725,7 +2751,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
log = root->log_root; log = root->log_root;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
dst_path = btrfs_alloc_path(); dst_path = btrfs_alloc_path();
if (!dst_path) {
btrfs_free_path(path);
return -ENOMEM;
}
min_key.objectid = inode->i_ino; min_key.objectid = inode->i_ino;
min_key.type = BTRFS_INODE_ITEM_KEY; min_key.type = BTRFS_INODE_ITEM_KEY;
...@@ -3080,6 +3112,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) ...@@ -3080,6 +3112,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
BUG_ON(!path); BUG_ON(!path);
trans = btrfs_start_transaction(fs_info->tree_root, 0); trans = btrfs_start_transaction(fs_info->tree_root, 0);
BUG_ON(IS_ERR(trans));
wc.trans = trans; wc.trans = trans;
wc.pin = 1; wc.pin = 1;
......
...@@ -1213,6 +1213,10 @@ static int btrfs_rm_dev_item(struct btrfs_root *root, ...@@ -1213,6 +1213,10 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
return -ENOMEM; return -ENOMEM;
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
btrfs_free_path(path);
return PTR_ERR(trans);
}
key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY; key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid; key.offset = device->devid;
...@@ -1606,6 +1610,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) ...@@ -1606,6 +1610,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
} }
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
kfree(device);
ret = PTR_ERR(trans);
goto error;
}
lock_chunks(root); lock_chunks(root);
device->writeable = 1; device->writeable = 1;
...@@ -1873,7 +1883,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, ...@@ -1873,7 +1883,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
return ret; return ret;
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
BUG_ON(!trans); BUG_ON(IS_ERR(trans));
lock_chunks(root); lock_chunks(root);
...@@ -2047,7 +2057,7 @@ int btrfs_balance(struct btrfs_root *dev_root) ...@@ -2047,7 +2057,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
BUG_ON(ret); BUG_ON(ret);
trans = btrfs_start_transaction(dev_root, 0); trans = btrfs_start_transaction(dev_root, 0);
BUG_ON(!trans); BUG_ON(IS_ERR(trans));
ret = btrfs_grow_device(trans, device, old_size); ret = btrfs_grow_device(trans, device, old_size);
BUG_ON(ret); BUG_ON(ret);
...@@ -2213,6 +2223,11 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) ...@@ -2213,6 +2223,11 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
/* Shrinking succeeded, else we would be at "done". */ /* Shrinking succeeded, else we would be at "done". */
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto done;
}
lock_chunks(root); lock_chunks(root);
device->disk_total_bytes = new_size; device->disk_total_bytes = new_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment