Commit ed8f3737 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: (31 commits)
  Btrfs: don't call writepages from within write_full_page
  Btrfs: Remove unused variable 'last_index' in file.c
  Btrfs: clean up for find_first_extent_bit()
  Btrfs: clean up for wait_extent_bit()
  Btrfs: clean up for insert_state()
  Btrfs: remove unused members from struct extent_state
  Btrfs: clean up code for merging extent maps
  Btrfs: clean up code for extent_map lookup
  Btrfs: clean up search_extent_mapping()
  Btrfs: remove redundant code for dir item lookup
  Btrfs: make acl functions really no-op if acl is not enabled
  Btrfs: remove remaining ref-cache code
  Btrfs: remove a BUG_ON() in btrfs_commit_transaction()
  Btrfs: use wait_event()
  Btrfs: check the nodatasum flag when writing compressed files
  Btrfs: copy string correctly in INO_LOOKUP ioctl
  Btrfs: don't print the leaf if we had an error
  btrfs: make btrfs_set_root_node void
  Btrfs: fix oops while writing data to SSD partitions
  Btrfs: Protect the readonly flag of block group
  ...

Fix up trivial conflicts (due to acl and writeback cleanups) in
 - fs/btrfs/acl.c
 - fs/btrfs/ctree.h
 - fs/btrfs/extent_io.c
parents a6b11f53 0d10ee2e
...@@ -6,5 +6,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ ...@@ -6,5 +6,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
transaction.o inode.o file.o tree-defrag.o \ transaction.o inode.o file.o tree-defrag.o \
extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
...@@ -28,8 +28,6 @@ ...@@ -28,8 +28,6 @@
#include "btrfs_inode.h" #include "btrfs_inode.h"
#include "xattr.h" #include "xattr.h"
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
struct posix_acl *btrfs_get_acl(struct inode *inode, int type) struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
{ {
int size; int size;
...@@ -276,18 +274,3 @@ const struct xattr_handler btrfs_xattr_acl_access_handler = { ...@@ -276,18 +274,3 @@ const struct xattr_handler btrfs_xattr_acl_access_handler = {
.get = btrfs_xattr_acl_get, .get = btrfs_xattr_acl_get,
.set = btrfs_xattr_acl_set, .set = btrfs_xattr_acl_set,
}; };
#else /* CONFIG_BTRFS_FS_POSIX_ACL */
int btrfs_acl_chmod(struct inode *inode)
{
return 0;
}
int btrfs_init_acl(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir)
{
return 0;
}
#endif /* CONFIG_BTRFS_FS_POSIX_ACL */
...@@ -338,6 +338,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, ...@@ -338,6 +338,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
u64 first_byte = disk_start; u64 first_byte = disk_start;
struct block_device *bdev; struct block_device *bdev;
int ret; int ret;
int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
...@@ -392,8 +393,11 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, ...@@ -392,8 +393,11 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
BUG_ON(ret); BUG_ON(ret);
ret = btrfs_csum_one_bio(root, inode, bio, start, 1); if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio,
start, 1);
BUG_ON(ret); BUG_ON(ret);
}
ret = btrfs_map_bio(root, WRITE, bio, 0, 1); ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
BUG_ON(ret); BUG_ON(ret);
...@@ -418,8 +422,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, ...@@ -418,8 +422,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
BUG_ON(ret); BUG_ON(ret);
if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio, start, 1); ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
BUG_ON(ret); BUG_ON(ret);
}
ret = btrfs_map_bio(root, WRITE, bio, 0, 1); ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
BUG_ON(ret); BUG_ON(ret);
......
...@@ -2406,7 +2406,7 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct ...@@ -2406,7 +2406,7 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
btrfs_root_item *item, struct btrfs_key *key); btrfs_root_item *item, struct btrfs_key *key);
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
int btrfs_find_orphan_roots(struct btrfs_root *tree_root); int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
int btrfs_set_root_node(struct btrfs_root_item *item, void btrfs_set_root_node(struct btrfs_root_item *item,
struct extent_buffer *node); struct extent_buffer *node);
void btrfs_check_and_init_root_item(struct btrfs_root_item *item); void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
...@@ -2523,6 +2523,14 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag ...@@ -2523,6 +2523,14 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
#define PageChecked PageFsMisc #define PageChecked PageFsMisc
#endif #endif
/* This forces readahead on a given range of bytes in an inode */
static inline void btrfs_force_ra(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
pgoff_t offset, unsigned long req_size)
{
page_cache_sync_readahead(mapping, ra, file, offset, req_size);
}
struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
int btrfs_set_inode_index(struct inode *dir, u64 *index); int btrfs_set_inode_index(struct inode *dir, u64 *index);
int btrfs_unlink_inode(struct btrfs_trans_handle *trans, int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
...@@ -2551,9 +2559,6 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, ...@@ -2551,9 +2559,6 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
int btrfs_merge_bio_hook(struct page *page, unsigned long offset, int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio, unsigned long bio_flags); size_t size, struct bio *bio, unsigned long bio_flags);
unsigned long btrfs_force_ra(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
pgoff_t offset, pgoff_t last_index);
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page); int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode); void btrfs_evict_inode(struct inode *inode);
...@@ -2648,12 +2653,21 @@ do { \ ...@@ -2648,12 +2653,21 @@ do { \
/* acl.c */ /* acl.c */
#ifdef CONFIG_BTRFS_FS_POSIX_ACL #ifdef CONFIG_BTRFS_FS_POSIX_ACL
struct posix_acl *btrfs_get_acl(struct inode *inode, int type); struct posix_acl *btrfs_get_acl(struct inode *inode, int type);
#else
#define btrfs_get_acl NULL
#endif
int btrfs_init_acl(struct btrfs_trans_handle *trans, int btrfs_init_acl(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir); struct inode *inode, struct inode *dir);
int btrfs_acl_chmod(struct inode *inode); int btrfs_acl_chmod(struct inode *inode);
#else
#define btrfs_get_acl NULL
static inline int btrfs_init_acl(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir)
{
return 0;
}
static inline int btrfs_acl_chmod(struct inode *inode)
{
return 0;
}
#endif
/* relocation.c */ /* relocation.c */
int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start); int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start);
......
...@@ -198,8 +198,6 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, ...@@ -198,8 +198,6 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
struct btrfs_key key; struct btrfs_key key;
int ins_len = mod < 0 ? -1 : 0; int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0; int cow = mod != 0;
struct btrfs_key found_key;
struct extent_buffer *leaf;
key.objectid = dir; key.objectid = dir;
btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
...@@ -209,18 +207,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, ...@@ -209,18 +207,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
if (ret < 0) if (ret < 0)
return ERR_PTR(ret); return ERR_PTR(ret);
if (ret > 0) { if (ret > 0)
if (path->slots[0] == 0)
return NULL;
path->slots[0]--;
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != dir ||
btrfs_key_type(&found_key) != BTRFS_DIR_ITEM_KEY ||
found_key.offset != key.offset)
return NULL; return NULL;
return btrfs_match_dir_item_name(root, path, name, name_len); return btrfs_match_dir_item_name(root, path, name, name_len);
...@@ -315,8 +302,6 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, ...@@ -315,8 +302,6 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_key key; struct btrfs_key key;
int ins_len = mod < 0 ? -1 : 0; int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0; int cow = mod != 0;
struct btrfs_key found_key;
struct extent_buffer *leaf;
key.objectid = dir; key.objectid = dir;
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
...@@ -324,18 +309,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, ...@@ -324,18 +309,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
if (ret < 0) if (ret < 0)
return ERR_PTR(ret); return ERR_PTR(ret);
if (ret > 0) { if (ret > 0)
if (path->slots[0] == 0)
return NULL;
path->slots[0]--;
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
if (found_key.objectid != dir ||
btrfs_key_type(&found_key) != BTRFS_XATTR_ITEM_KEY ||
found_key.offset != key.offset)
return NULL; return NULL;
return btrfs_match_dir_item_name(root, path, name, name_len); return btrfs_match_dir_item_name(root, path, name, name_len);
......
...@@ -663,7 +663,9 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) ...@@ -663,7 +663,9 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
struct btrfs_path *path; struct btrfs_path *path;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path)
return -ENOMEM;
key.objectid = start; key.objectid = start;
key.offset = len; key.offset = len;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
...@@ -3272,6 +3274,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, ...@@ -3272,6 +3274,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
} }
ret = btrfs_alloc_chunk(trans, extent_root, flags); ret = btrfs_alloc_chunk(trans, extent_root, flags);
if (ret < 0 && ret != -ENOSPC)
goto out;
spin_lock(&space_info->lock); spin_lock(&space_info->lock);
if (ret) if (ret)
space_info->full = 1; space_info->full = 1;
...@@ -3281,6 +3286,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, ...@@ -3281,6 +3286,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
space_info->chunk_alloc = 0; space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock); spin_unlock(&space_info->lock);
out:
mutex_unlock(&extent_root->fs_info->chunk_mutex); mutex_unlock(&extent_root->fs_info->chunk_mutex);
return ret; return ret;
} }
...@@ -4456,7 +4462,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, ...@@ -4456,7 +4462,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
printk(KERN_ERR "umm, got %d back from search" printk(KERN_ERR "umm, got %d back from search"
", was looking for %llu\n", ret, ", was looking for %llu\n", ret,
(unsigned long long)bytenr); (unsigned long long)bytenr);
btrfs_print_leaf(extent_root, path->nodes[0]); if (ret > 0)
btrfs_print_leaf(extent_root,
path->nodes[0]);
} }
BUG_ON(ret); BUG_ON(ret);
extent_slot = path->slots[0]; extent_slot = path->slots[0];
...@@ -5073,7 +5081,9 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, ...@@ -5073,7 +5081,9 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
* group is does point to and try again * group is does point to and try again
*/ */
if (!last_ptr_loop && last_ptr->block_group && if (!last_ptr_loop && last_ptr->block_group &&
last_ptr->block_group != block_group) { last_ptr->block_group != block_group &&
index <=
get_block_group_index(last_ptr->block_group)) {
btrfs_put_block_group(block_group); btrfs_put_block_group(block_group);
block_group = last_ptr->block_group; block_group = last_ptr->block_group;
...@@ -5501,7 +5511,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, ...@@ -5501,7 +5511,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref); u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path)
return -ENOMEM;
path->leave_spinning = 1; path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
...@@ -6272,10 +6283,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root, ...@@ -6272,10 +6283,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
int level; int level;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path)
return -ENOMEM;
wc = kzalloc(sizeof(*wc), GFP_NOFS); wc = kzalloc(sizeof(*wc), GFP_NOFS);
BUG_ON(!wc); if (!wc) {
btrfs_free_path(path);
return -ENOMEM;
}
trans = btrfs_start_transaction(tree_root, 0); trans = btrfs_start_transaction(tree_root, 0);
BUG_ON(IS_ERR(trans)); BUG_ON(IS_ERR(trans));
...@@ -6538,8 +6553,6 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force) ...@@ -6538,8 +6553,6 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
u64 min_allocable_bytes; u64 min_allocable_bytes;
int ret = -ENOSPC; int ret = -ENOSPC;
if (cache->ro)
return 0;
/* /*
* We need some metadata space and system metadata space for * We need some metadata space and system metadata space for
...@@ -6555,6 +6568,12 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force) ...@@ -6555,6 +6568,12 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
spin_lock(&sinfo->lock); spin_lock(&sinfo->lock);
spin_lock(&cache->lock); spin_lock(&cache->lock);
if (cache->ro) {
ret = 0;
goto out;
}
num_bytes = cache->key.offset - cache->reserved - cache->pinned - num_bytes = cache->key.offset - cache->reserved - cache->pinned -
cache->bytes_super - btrfs_block_group_used(&cache->item); cache->bytes_super - btrfs_block_group_used(&cache->item);
...@@ -6568,7 +6587,7 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force) ...@@ -6568,7 +6587,7 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
cache->ro = 1; cache->ro = 1;
ret = 0; ret = 0;
} }
out:
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
spin_unlock(&sinfo->lock); spin_unlock(&sinfo->lock);
return ret; return ret;
...@@ -7183,11 +7202,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ...@@ -7183,11 +7202,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cluster->refill_lock); spin_unlock(&cluster->refill_lock);
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path) {
ret = -ENOMEM;
goto out;
}
inode = lookup_free_space_inode(root, block_group, path); inode = lookup_free_space_inode(root, block_group, path);
if (!IS_ERR(inode)) { if (!IS_ERR(inode)) {
btrfs_orphan_add(trans, inode); ret = btrfs_orphan_add(trans, inode);
BUG_ON(ret);
clear_nlink(inode); clear_nlink(inode);
/* One for the block groups ref */ /* One for the block groups ref */
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
......
...@@ -254,14 +254,14 @@ static void merge_cb(struct extent_io_tree *tree, struct extent_state *new, ...@@ -254,14 +254,14 @@ static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
* *
* This should be called with the tree lock held. * This should be called with the tree lock held.
*/ */
static int merge_state(struct extent_io_tree *tree, static void merge_state(struct extent_io_tree *tree,
struct extent_state *state) struct extent_state *state)
{ {
struct extent_state *other; struct extent_state *other;
struct rb_node *other_node; struct rb_node *other_node;
if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
return 0; return;
other_node = rb_prev(&state->rb_node); other_node = rb_prev(&state->rb_node);
if (other_node) { if (other_node) {
...@@ -287,19 +287,13 @@ static int merge_state(struct extent_io_tree *tree, ...@@ -287,19 +287,13 @@ static int merge_state(struct extent_io_tree *tree,
free_extent_state(other); free_extent_state(other);
} }
} }
return 0;
} }
static int set_state_cb(struct extent_io_tree *tree, static void set_state_cb(struct extent_io_tree *tree,
struct extent_state *state, int *bits) struct extent_state *state, int *bits)
{ {
if (tree->ops && tree->ops->set_bit_hook) { if (tree->ops && tree->ops->set_bit_hook)
return tree->ops->set_bit_hook(tree->mapping->host, tree->ops->set_bit_hook(tree->mapping->host, state, bits);
state, bits);
}
return 0;
} }
static void clear_state_cb(struct extent_io_tree *tree, static void clear_state_cb(struct extent_io_tree *tree,
...@@ -309,6 +303,9 @@ static void clear_state_cb(struct extent_io_tree *tree, ...@@ -309,6 +303,9 @@ static void clear_state_cb(struct extent_io_tree *tree,
tree->ops->clear_bit_hook(tree->mapping->host, state, bits); tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
} }
static void set_state_bits(struct extent_io_tree *tree,
struct extent_state *state, int *bits);
/* /*
* insert an extent_state struct into the tree. 'bits' are set on the * insert an extent_state struct into the tree. 'bits' are set on the
* struct before it is inserted. * struct before it is inserted.
...@@ -324,8 +321,6 @@ static int insert_state(struct extent_io_tree *tree, ...@@ -324,8 +321,6 @@ static int insert_state(struct extent_io_tree *tree,
int *bits) int *bits)
{ {
struct rb_node *node; struct rb_node *node;
int bits_to_set = *bits & ~EXTENT_CTLBITS;
int ret;
if (end < start) { if (end < start) {
printk(KERN_ERR "btrfs end < start %llu %llu\n", printk(KERN_ERR "btrfs end < start %llu %llu\n",
...@@ -335,13 +330,9 @@ static int insert_state(struct extent_io_tree *tree, ...@@ -335,13 +330,9 @@ static int insert_state(struct extent_io_tree *tree,
} }
state->start = start; state->start = start;
state->end = end; state->end = end;
ret = set_state_cb(tree, state, bits);
if (ret)
return ret;
if (bits_to_set & EXTENT_DIRTY) set_state_bits(tree, state, bits);
tree->dirty_bytes += end - start + 1;
state->state |= bits_to_set;
node = tree_insert(&tree->state, end, &state->rb_node); node = tree_insert(&tree->state, end, &state->rb_node);
if (node) { if (node) {
struct extent_state *found; struct extent_state *found;
...@@ -357,13 +348,11 @@ static int insert_state(struct extent_io_tree *tree, ...@@ -357,13 +348,11 @@ static int insert_state(struct extent_io_tree *tree,
return 0; return 0;
} }
static int split_cb(struct extent_io_tree *tree, struct extent_state *orig, static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
u64 split) u64 split)
{ {
if (tree->ops && tree->ops->split_extent_hook) if (tree->ops && tree->ops->split_extent_hook)
return tree->ops->split_extent_hook(tree->mapping->host, tree->ops->split_extent_hook(tree->mapping->host, orig, split);
orig, split);
return 0;
} }
/* /*
...@@ -659,34 +648,25 @@ int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits) ...@@ -659,34 +648,25 @@ int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
if (start > end) if (start > end)
break; break;
if (need_resched()) { cond_resched_lock(&tree->lock);
spin_unlock(&tree->lock);
cond_resched();
spin_lock(&tree->lock);
}
} }
out: out:
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
return 0; return 0;
} }
static int set_state_bits(struct extent_io_tree *tree, static void set_state_bits(struct extent_io_tree *tree,
struct extent_state *state, struct extent_state *state,
int *bits) int *bits)
{ {
int ret;
int bits_to_set = *bits & ~EXTENT_CTLBITS; int bits_to_set = *bits & ~EXTENT_CTLBITS;
ret = set_state_cb(tree, state, bits); set_state_cb(tree, state, bits);
if (ret)
return ret;
if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
u64 range = state->end - state->start + 1; u64 range = state->end - state->start + 1;
tree->dirty_bytes += range; tree->dirty_bytes += range;
} }
state->state |= bits_to_set; state->state |= bits_to_set;
return 0;
} }
static void cache_state(struct extent_state *state, static void cache_state(struct extent_state *state,
...@@ -779,9 +759,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -779,9 +759,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
goto out; goto out;
} }
err = set_state_bits(tree, state, &bits); set_state_bits(tree, state, &bits);
if (err)
goto out;
cache_state(state, cached_state); cache_state(state, cached_state);
merge_state(tree, state); merge_state(tree, state);
...@@ -830,9 +808,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -830,9 +808,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
if (err) if (err)
goto out; goto out;
if (state->end <= end) { if (state->end <= end) {
err = set_state_bits(tree, state, &bits); set_state_bits(tree, state, &bits);
if (err)
goto out;
cache_state(state, cached_state); cache_state(state, cached_state);
merge_state(tree, state); merge_state(tree, state);
if (last_end == (u64)-1) if (last_end == (u64)-1)
...@@ -893,11 +869,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -893,11 +869,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
err = split_state(tree, state, prealloc, end + 1); err = split_state(tree, state, prealloc, end + 1);
BUG_ON(err == -EEXIST); BUG_ON(err == -EEXIST);
err = set_state_bits(tree, prealloc, &bits); set_state_bits(tree, prealloc, &bits);
if (err) {
prealloc = NULL;
goto out;
}
cache_state(prealloc, cached_state); cache_state(prealloc, cached_state);
merge_state(tree, prealloc); merge_state(tree, prealloc);
prealloc = NULL; prealloc = NULL;
...@@ -1059,46 +1031,6 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) ...@@ -1059,46 +1031,6 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
return 0; return 0;
} }
/*
* find the first offset in the io tree with 'bits' set. zero is
* returned if we find something, and *start_ret and *end_ret are
* set to reflect the state struct that was found.
*
* If nothing was found, 1 is returned, < 0 on error
*/
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, int bits)
{
struct rb_node *node;
struct extent_state *state;
int ret = 1;
spin_lock(&tree->lock);
/*
* this search will find all the extents that end after
* our range starts.
*/
node = tree_search(tree, start);
if (!node)
goto out;
while (1) {
state = rb_entry(node, struct extent_state, rb_node);
if (state->end >= start && (state->state & bits)) {
*start_ret = state->start;
*end_ret = state->end;
ret = 0;
break;
}
node = rb_next(node);
if (!node)
break;
}
out:
spin_unlock(&tree->lock);
return ret;
}
/* find the first state struct with 'bits' set after 'start', and /* find the first state struct with 'bits' set after 'start', and
* return it. tree->lock must be held. NULL will returned if * return it. tree->lock must be held. NULL will returned if
* nothing was found after 'start' * nothing was found after 'start'
...@@ -1130,6 +1062,30 @@ struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, ...@@ -1130,6 +1062,30 @@ struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
return NULL; return NULL;
} }
/*
* find the first offset in the io tree with 'bits' set. zero is
* returned if we find something, and *start_ret and *end_ret are
* set to reflect the state struct that was found.
*
* If nothing was found, 1 is returned, < 0 on error
*/
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, int bits)
{
struct extent_state *state;
int ret = 1;
spin_lock(&tree->lock);
state = find_first_extent_bit_state(tree, start, bits);
if (state) {
*start_ret = state->start;
*end_ret = state->end;
ret = 0;
}
spin_unlock(&tree->lock);
return ret;
}
/* /*
* find a contiguous range of bytes in the file marked as delalloc, not * find a contiguous range of bytes in the file marked as delalloc, not
* more than 'max_bytes'. start and end are used to return the range, * more than 'max_bytes'. start and end are used to return the range,
...@@ -2546,7 +2502,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, ...@@ -2546,7 +2502,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
struct writeback_control *wbc) struct writeback_control *wbc)
{ {
int ret; int ret;
struct address_space *mapping = page->mapping;
struct extent_page_data epd = { struct extent_page_data epd = {
.bio = NULL, .bio = NULL,
.tree = tree, .tree = tree,
...@@ -2554,17 +2509,9 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, ...@@ -2554,17 +2509,9 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
.extent_locked = 0, .extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL, .sync_io = wbc->sync_mode == WB_SYNC_ALL,
}; };
struct writeback_control wbc_writepages = {
.sync_mode = wbc->sync_mode,
.nr_to_write = 64,
.range_start = page_offset(page) + PAGE_CACHE_SIZE,
.range_end = (loff_t)-1,
};
ret = __extent_writepage(page, wbc, &epd); ret = __extent_writepage(page, wbc, &epd);
extent_write_cache_pages(tree, mapping, &wbc_writepages,
__extent_writepage, &epd, flush_write_bio);
flush_epd_write_bio(&epd); flush_epd_write_bio(&epd);
return ret; return ret;
} }
......
...@@ -76,14 +76,14 @@ struct extent_io_ops { ...@@ -76,14 +76,14 @@ struct extent_io_ops {
struct extent_state *state); struct extent_state *state);
int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate); struct extent_state *state, int uptodate);
int (*set_bit_hook)(struct inode *inode, struct extent_state *state, void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
int *bits); int *bits);
int (*clear_bit_hook)(struct inode *inode, struct extent_state *state, void (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
int *bits); int *bits);
int (*merge_extent_hook)(struct inode *inode, void (*merge_extent_hook)(struct inode *inode,
struct extent_state *new, struct extent_state *new,
struct extent_state *other); struct extent_state *other);
int (*split_extent_hook)(struct inode *inode, void (*split_extent_hook)(struct inode *inode,
struct extent_state *orig, u64 split); struct extent_state *orig, u64 split);
int (*write_cache_pages_lock_hook)(struct page *page); int (*write_cache_pages_lock_hook)(struct page *page);
}; };
...@@ -108,8 +108,6 @@ struct extent_state { ...@@ -108,8 +108,6 @@ struct extent_state {
wait_queue_head_t wq; wait_queue_head_t wq;
atomic_t refs; atomic_t refs;
unsigned long state; unsigned long state;
u64 split_start;
u64 split_end;
/* for use by the FS */ /* for use by the FS */
u64 private; u64 private;
......
...@@ -183,22 +183,10 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next) ...@@ -183,22 +183,10 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
return 0; return 0;
} }
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len) static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
{ {
int ret = 0;
struct extent_map *merge = NULL; struct extent_map *merge = NULL;
struct rb_node *rb; struct rb_node *rb;
struct extent_map *em;
write_lock(&tree->lock);
em = lookup_extent_mapping(tree, start, len);
WARN_ON(!em || em->start != start);
if (!em)
goto out;
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
if (em->start != 0) { if (em->start != 0) {
rb = rb_prev(&em->rb_node); rb = rb_prev(&em->rb_node);
...@@ -225,6 +213,24 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len) ...@@ -225,6 +213,24 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
merge->in_tree = 0; merge->in_tree = 0;
free_extent_map(merge); free_extent_map(merge);
} }
}
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
{
int ret = 0;
struct extent_map *em;
write_lock(&tree->lock);
em = lookup_extent_mapping(tree, start, len);
WARN_ON(!em || em->start != start);
if (!em)
goto out;
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
try_merge_map(tree, em);
free_extent_map(em); free_extent_map(em);
out: out:
...@@ -247,7 +253,6 @@ int add_extent_mapping(struct extent_map_tree *tree, ...@@ -247,7 +253,6 @@ int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em) struct extent_map *em)
{ {
int ret = 0; int ret = 0;
struct extent_map *merge = NULL;
struct rb_node *rb; struct rb_node *rb;
struct extent_map *exist; struct extent_map *exist;
...@@ -263,30 +268,8 @@ int add_extent_mapping(struct extent_map_tree *tree, ...@@ -263,30 +268,8 @@ int add_extent_mapping(struct extent_map_tree *tree,
goto out; goto out;
} }
atomic_inc(&em->refs); atomic_inc(&em->refs);
if (em->start != 0) {
rb = rb_prev(&em->rb_node); try_merge_map(tree, em);
if (rb)
merge = rb_entry(rb, struct extent_map, rb_node);
if (rb && mergable_maps(merge, em)) {
em->start = merge->start;
em->len += merge->len;
em->block_len += merge->block_len;
em->block_start = merge->block_start;
merge->in_tree = 0;
rb_erase(&merge->rb_node, &tree->map);
free_extent_map(merge);
}
}
rb = rb_next(&em->rb_node);
if (rb)
merge = rb_entry(rb, struct extent_map, rb_node);
if (rb && mergable_maps(em, merge)) {
em->len += merge->len;
em->block_len += merge->len;
rb_erase(&merge->rb_node, &tree->map);
merge->in_tree = 0;
free_extent_map(merge);
}
out: out:
return ret; return ret;
} }
...@@ -299,19 +282,8 @@ static u64 range_end(u64 start, u64 len) ...@@ -299,19 +282,8 @@ static u64 range_end(u64 start, u64 len)
return start + len; return start + len;
} }
/** struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree,
* lookup_extent_mapping - lookup extent_map u64 start, u64 len, int strict)
* @tree: tree to lookup in
* @start: byte offset to start the search
* @len: length of the lookup range
*
* Find and return the first extent_map struct in @tree that intersects the
* [start, len] range. There may be additional objects in the tree that
* intersect, so check the object returned carefully to make sure that no
* additional lookups are needed.
*/
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len)
{ {
struct extent_map *em; struct extent_map *em;
struct rb_node *rb_node; struct rb_node *rb_node;
...@@ -320,37 +292,41 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, ...@@ -320,37 +292,41 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 end = range_end(start, len); u64 end = range_end(start, len);
rb_node = __tree_search(&tree->map, start, &prev, &next); rb_node = __tree_search(&tree->map, start, &prev, &next);
if (!rb_node && prev) {
em = rb_entry(prev, struct extent_map, rb_node);
if (end > em->start && start < extent_map_end(em))
goto found;
}
if (!rb_node && next) {
em = rb_entry(next, struct extent_map, rb_node);
if (end > em->start && start < extent_map_end(em))
goto found;
}
if (!rb_node) { if (!rb_node) {
em = NULL; if (prev)
goto out; rb_node = prev;
} else if (next)
if (IS_ERR(rb_node)) { rb_node = next;
em = ERR_CAST(rb_node); else
goto out; return NULL;
} }
em = rb_entry(rb_node, struct extent_map, rb_node); em = rb_entry(rb_node, struct extent_map, rb_node);
if (end > em->start && start < extent_map_end(em))
goto found;
em = NULL; if (strict && !(end > em->start && start < extent_map_end(em)))
goto out; return NULL;
found:
atomic_inc(&em->refs); atomic_inc(&em->refs);
out:
return em; return em;
} }
/**
* lookup_extent_mapping - lookup extent_map
* @tree: tree to lookup in
* @start: byte offset to start the search
* @len: length of the lookup range
*
* Find and return the first extent_map struct in @tree that intersects the
* [start, len] range. There may be additional objects in the tree that
* intersect, so check the object returned carefully to make sure that no
* additional lookups are needed.
*/
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len)
{
return __lookup_extent_mapping(tree, start, len, 1);
}
/** /**
* search_extent_mapping - find a nearby extent map * search_extent_mapping - find a nearby extent map
* @tree: tree to lookup in * @tree: tree to lookup in
...@@ -365,38 +341,7 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, ...@@ -365,38 +341,7 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
struct extent_map *search_extent_mapping(struct extent_map_tree *tree, struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len) u64 start, u64 len)
{ {
struct extent_map *em; return __lookup_extent_mapping(tree, start, len, 0);
struct rb_node *rb_node;
struct rb_node *prev = NULL;
struct rb_node *next = NULL;
rb_node = __tree_search(&tree->map, start, &prev, &next);
if (!rb_node && prev) {
em = rb_entry(prev, struct extent_map, rb_node);
goto found;
}
if (!rb_node && next) {
em = rb_entry(next, struct extent_map, rb_node);
goto found;
}
if (!rb_node) {
em = NULL;
goto out;
}
if (IS_ERR(rb_node)) {
em = ERR_CAST(rb_node);
goto out;
}
em = rb_entry(rb_node, struct extent_map, rb_node);
goto found;
em = NULL;
goto out;
found:
atomic_inc(&em->refs);
out:
return em;
} }
/** /**
......
...@@ -291,7 +291,8 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, ...@@ -291,7 +291,8 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy);
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path)
return -ENOMEM;
if (search_commit) { if (search_commit) {
path->skip_locking = 1; path->skip_locking = 1;
...@@ -677,7 +678,9 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, ...@@ -677,7 +678,9 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
btrfs_super_csum_size(&root->fs_info->super_copy); btrfs_super_csum_size(&root->fs_info->super_copy);
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path)
return -ENOMEM;
sector_sum = sums->sums; sector_sum = sums->sums;
again: again:
next_offset = (u64)-1; next_offset = (u64)-1;
......
...@@ -74,7 +74,7 @@ struct inode_defrag { ...@@ -74,7 +74,7 @@ struct inode_defrag {
* If an existing record is found the defrag item you * If an existing record is found the defrag item you
* pass in is freed * pass in is freed
*/ */
static int __btrfs_add_inode_defrag(struct inode *inode, static void __btrfs_add_inode_defrag(struct inode *inode,
struct inode_defrag *defrag) struct inode_defrag *defrag)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
...@@ -106,11 +106,11 @@ static int __btrfs_add_inode_defrag(struct inode *inode, ...@@ -106,11 +106,11 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
BTRFS_I(inode)->in_defrag = 1; BTRFS_I(inode)->in_defrag = 1;
rb_link_node(&defrag->rb_node, parent, p); rb_link_node(&defrag->rb_node, parent, p);
rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes); rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
return 0; return;
exists: exists:
kfree(defrag); kfree(defrag);
return 0; return;
} }
...@@ -123,7 +123,6 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, ...@@ -123,7 +123,6 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct inode_defrag *defrag; struct inode_defrag *defrag;
int ret = 0;
u64 transid; u64 transid;
if (!btrfs_test_opt(root, AUTO_DEFRAG)) if (!btrfs_test_opt(root, AUTO_DEFRAG))
...@@ -150,9 +149,9 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, ...@@ -150,9 +149,9 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
spin_lock(&root->fs_info->defrag_inodes_lock); spin_lock(&root->fs_info->defrag_inodes_lock);
if (!BTRFS_I(inode)->in_defrag) if (!BTRFS_I(inode)->in_defrag)
ret = __btrfs_add_inode_defrag(inode, defrag); __btrfs_add_inode_defrag(inode, defrag);
spin_unlock(&root->fs_info->defrag_inodes_lock); spin_unlock(&root->fs_info->defrag_inodes_lock);
return ret; return 0;
} }
/* /*
...@@ -855,7 +854,8 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, ...@@ -855,7 +854,8 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
btrfs_drop_extent_cache(inode, start, end - 1, 0); btrfs_drop_extent_cache(inode, start, end - 1, 0);
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path)
return -ENOMEM;
again: again:
recow = 0; recow = 0;
split = start; split = start;
...@@ -1059,7 +1059,7 @@ static int prepare_uptodate_page(struct page *page, u64 pos) ...@@ -1059,7 +1059,7 @@ static int prepare_uptodate_page(struct page *page, u64 pos)
static noinline int prepare_pages(struct btrfs_root *root, struct file *file, static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
struct page **pages, size_t num_pages, struct page **pages, size_t num_pages,
loff_t pos, unsigned long first_index, loff_t pos, unsigned long first_index,
unsigned long last_index, size_t write_bytes) size_t write_bytes)
{ {
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
int i; int i;
...@@ -1159,7 +1159,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1159,7 +1159,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL; struct page **pages = NULL;
unsigned long first_index; unsigned long first_index;
unsigned long last_index;
size_t num_written = 0; size_t num_written = 0;
int nrptrs; int nrptrs;
int ret = 0; int ret = 0;
...@@ -1172,7 +1171,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1172,7 +1171,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
return -ENOMEM; return -ENOMEM;
first_index = pos >> PAGE_CACHE_SHIFT; first_index = pos >> PAGE_CACHE_SHIFT;
last_index = (pos + iov_iter_count(i)) >> PAGE_CACHE_SHIFT;
while (iov_iter_count(i) > 0) { while (iov_iter_count(i) > 0) {
size_t offset = pos & (PAGE_CACHE_SIZE - 1); size_t offset = pos & (PAGE_CACHE_SIZE - 1);
...@@ -1206,8 +1204,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1206,8 +1204,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
* contents of pages from loop to loop * contents of pages from loop to loop
*/ */
ret = prepare_pages(root, file, pages, num_pages, ret = prepare_pages(root, file, pages, num_pages,
pos, first_index, last_index, pos, first_index, write_bytes);
write_bytes);
if (ret) { if (ret) {
btrfs_delalloc_release_space(inode, btrfs_delalloc_release_space(inode,
num_pages << PAGE_CACHE_SHIFT); num_pages << PAGE_CACHE_SHIFT);
......
...@@ -1061,7 +1061,8 @@ static noinline int run_delalloc_nocow(struct inode *inode, ...@@ -1061,7 +1061,8 @@ static noinline int run_delalloc_nocow(struct inode *inode,
u64 ino = btrfs_ino(inode); u64 ino = btrfs_ino(inode);
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path)
return -ENOMEM;
nolock = btrfs_is_free_space_inode(root, inode); nolock = btrfs_is_free_space_inode(root, inode);
...@@ -1282,17 +1283,16 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, ...@@ -1282,17 +1283,16 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
return ret; return ret;
} }
static int btrfs_split_extent_hook(struct inode *inode, static void btrfs_split_extent_hook(struct inode *inode,
struct extent_state *orig, u64 split) struct extent_state *orig, u64 split)
{ {
/* not delalloc, ignore it */ /* not delalloc, ignore it */
if (!(orig->state & EXTENT_DELALLOC)) if (!(orig->state & EXTENT_DELALLOC))
return 0; return;
spin_lock(&BTRFS_I(inode)->lock); spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++; BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock); spin_unlock(&BTRFS_I(inode)->lock);
return 0;
} }
/* /*
...@@ -1301,18 +1301,17 @@ static int btrfs_split_extent_hook(struct inode *inode, ...@@ -1301,18 +1301,17 @@ static int btrfs_split_extent_hook(struct inode *inode,
* extents, such as when we are doing sequential writes, so we can properly * extents, such as when we are doing sequential writes, so we can properly
* account for the metadata space we'll need. * account for the metadata space we'll need.
*/ */
static int btrfs_merge_extent_hook(struct inode *inode, static void btrfs_merge_extent_hook(struct inode *inode,
struct extent_state *new, struct extent_state *new,
struct extent_state *other) struct extent_state *other)
{ {
/* not delalloc, ignore it */ /* not delalloc, ignore it */
if (!(other->state & EXTENT_DELALLOC)) if (!(other->state & EXTENT_DELALLOC))
return 0; return;
spin_lock(&BTRFS_I(inode)->lock); spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents--; BTRFS_I(inode)->outstanding_extents--;
spin_unlock(&BTRFS_I(inode)->lock); spin_unlock(&BTRFS_I(inode)->lock);
return 0;
} }
/* /*
...@@ -1320,7 +1319,7 @@ static int btrfs_merge_extent_hook(struct inode *inode, ...@@ -1320,7 +1319,7 @@ static int btrfs_merge_extent_hook(struct inode *inode,
* bytes in this file, and to maintain the list of inodes that * bytes in this file, and to maintain the list of inodes that
* have pending delalloc work to be done. * have pending delalloc work to be done.
*/ */
static int btrfs_set_bit_hook(struct inode *inode, static void btrfs_set_bit_hook(struct inode *inode,
struct extent_state *state, int *bits) struct extent_state *state, int *bits)
{ {
...@@ -1351,13 +1350,12 @@ static int btrfs_set_bit_hook(struct inode *inode, ...@@ -1351,13 +1350,12 @@ static int btrfs_set_bit_hook(struct inode *inode,
} }
spin_unlock(&root->fs_info->delalloc_lock); spin_unlock(&root->fs_info->delalloc_lock);
} }
return 0;
} }
/* /*
* extent_io.c clear_bit_hook, see set_bit_hook for why * extent_io.c clear_bit_hook, see set_bit_hook for why
*/ */
static int btrfs_clear_bit_hook(struct inode *inode, static void btrfs_clear_bit_hook(struct inode *inode,
struct extent_state *state, int *bits) struct extent_state *state, int *bits)
{ {
/* /*
...@@ -1395,7 +1393,6 @@ static int btrfs_clear_bit_hook(struct inode *inode, ...@@ -1395,7 +1393,6 @@ static int btrfs_clear_bit_hook(struct inode *inode,
} }
spin_unlock(&root->fs_info->delalloc_lock); spin_unlock(&root->fs_info->delalloc_lock);
} }
return 0;
} }
/* /*
...@@ -1645,7 +1642,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, ...@@ -1645,7 +1642,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
int ret; int ret;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path)
return -ENOMEM;
path->leave_spinning = 1; path->leave_spinning = 1;
...@@ -2215,7 +2213,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) ...@@ -2215,7 +2213,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
if (!root->orphan_block_rsv) { if (!root->orphan_block_rsv) {
block_rsv = btrfs_alloc_block_rsv(root); block_rsv = btrfs_alloc_block_rsv(root);
BUG_ON(!block_rsv); if (!block_rsv)
return -ENOMEM;
} }
spin_lock(&root->orphan_lock); spin_lock(&root->orphan_lock);
...@@ -2517,7 +2516,9 @@ static void btrfs_read_locked_inode(struct inode *inode) ...@@ -2517,7 +2516,9 @@ static void btrfs_read_locked_inode(struct inode *inode)
filled = true; filled = true;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path)
goto make_bad;
path->leave_spinning = 1; path->leave_spinning = 1;
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
...@@ -2998,13 +2999,16 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) ...@@ -2998,13 +2999,16 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
dentry->d_name.name, dentry->d_name.len); dentry->d_name.name, dentry->d_name.len);
BUG_ON(ret); if (ret)
goto out;
if (inode->i_nlink == 0) { if (inode->i_nlink == 0) {
ret = btrfs_orphan_add(trans, inode); ret = btrfs_orphan_add(trans, inode);
BUG_ON(ret); if (ret)
goto out;
} }
out:
nr = trans->blocks_used; nr = trans->blocks_used;
__unlink_end_trans(trans, root); __unlink_end_trans(trans, root);
btrfs_btree_balance_dirty(root, nr); btrfs_btree_balance_dirty(root, nr);
...@@ -3147,6 +3151,11 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -3147,6 +3151,11 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = -1;
if (root->ref_cows || root == root->fs_info->tree_root) if (root->ref_cows || root == root->fs_info->tree_root)
btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
...@@ -3159,10 +3168,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -3159,10 +3168,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
if (min_type == 0 && root == BTRFS_I(inode)->root) if (min_type == 0 && root == BTRFS_I(inode)->root)
btrfs_kill_delayed_inode_items(inode); btrfs_kill_delayed_inode_items(inode);
path = btrfs_alloc_path();
BUG_ON(!path);
path->reada = -1;
key.objectid = ino; key.objectid = ino;
key.offset = (u64)-1; key.offset = (u64)-1;
key.type = (u8)-1; key.type = (u8)-1;
...@@ -3690,7 +3695,8 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, ...@@ -3690,7 +3695,8 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
int ret = 0; int ret = 0;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path)
return -ENOMEM;
di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
namelen, 0); namelen, 0);
...@@ -3946,6 +3952,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, ...@@ -3946,6 +3952,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *new) struct btrfs_root *root, int *new)
{ {
struct inode *inode; struct inode *inode;
int bad_inode = 0;
inode = btrfs_iget_locked(s, location->objectid, root); inode = btrfs_iget_locked(s, location->objectid, root);
if (!inode) if (!inode)
...@@ -3955,10 +3962,19 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, ...@@ -3955,10 +3962,19 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
BTRFS_I(inode)->root = root; BTRFS_I(inode)->root = root;
memcpy(&BTRFS_I(inode)->location, location, sizeof(*location)); memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
btrfs_read_locked_inode(inode); btrfs_read_locked_inode(inode);
if (!is_bad_inode(inode)) {
inode_tree_add(inode); inode_tree_add(inode);
unlock_new_inode(inode); unlock_new_inode(inode);
if (new) if (new)
*new = 1; *new = 1;
} else {
bad_inode = 1;
}
}
if (bad_inode) {
iput(inode);
inode = ERR_PTR(-ESTALE);
} }
return inode; return inode;
...@@ -4451,7 +4467,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, ...@@ -4451,7 +4467,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
int owner; int owner;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path)
return ERR_PTR(-ENOMEM);
inode = new_inode(root->fs_info->sb); inode = new_inode(root->fs_info->sb);
if (!inode) { if (!inode) {
...@@ -6711,19 +6728,6 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, ...@@ -6711,19 +6728,6 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
return 0; return 0;
} }
/* helper function for file defrag and space balancing. This
* forces readahead on a given range of bytes in an inode
*/
unsigned long btrfs_force_ra(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
pgoff_t offset, pgoff_t last_index)
{
pgoff_t req_size = last_index - offset + 1;
page_cache_sync_readahead(mapping, ra, file, offset, req_size);
return offset + req_size;
}
struct inode *btrfs_alloc_inode(struct super_block *sb) struct inode *btrfs_alloc_inode(struct super_block *sb)
{ {
struct btrfs_inode *ei; struct btrfs_inode *ei;
...@@ -7206,7 +7210,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, ...@@ -7206,7 +7210,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
goto out_unlock; goto out_unlock;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path) {
err = -ENOMEM;
drop_inode = 1;
goto out_unlock;
}
key.objectid = btrfs_ino(inode); key.objectid = btrfs_ino(inode);
key.offset = 0; key.offset = 0;
btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
......
...@@ -1749,11 +1749,10 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, ...@@ -1749,11 +1749,10 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
key.objectid = key.offset; key.objectid = key.offset;
key.offset = (u64)-1; key.offset = (u64)-1;
dirid = key.objectid; dirid = key.objectid;
} }
if (ptr < name) if (ptr < name)
goto out; goto out;
memcpy(name, ptr, total_len); memmove(name, ptr, total_len);
name[total_len]='\0'; name[total_len]='\0';
ret = 0; ret = 0;
out: out:
......
/*
* Copyright (C) 2008 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include "ctree.h"
#include "ref-cache.h"
#include "transaction.h"
static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
struct rb_node *node)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct btrfs_leaf_ref *entry;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node);
if (bytenr < entry->bytenr)
p = &(*p)->rb_left;
else if (bytenr > entry->bytenr)
p = &(*p)->rb_right;
else
return parent;
}
entry = rb_entry(node, struct btrfs_leaf_ref, rb_node);
rb_link_node(node, parent, p);
rb_insert_color(node, root);
return NULL;
}
static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
{
struct rb_node *n = root->rb_node;
struct btrfs_leaf_ref *entry;
while (n) {
entry = rb_entry(n, struct btrfs_leaf_ref, rb_node);
WARN_ON(!entry->in_tree);
if (bytenr < entry->bytenr)
n = n->rb_left;
else if (bytenr > entry->bytenr)
n = n->rb_right;
else
return n;
}
return NULL;
}
/*
* Copyright (C) 2008 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef __REFCACHE__
#define __REFCACHE__
struct btrfs_extent_info {
/* bytenr and num_bytes find the extent in the extent allocation tree */
u64 bytenr;
u64 num_bytes;
/* objectid and offset find the back reference for the file */
u64 objectid;
u64 offset;
};
struct btrfs_leaf_ref {
struct rb_node rb_node;
struct btrfs_leaf_ref_tree *tree;
int in_tree;
atomic_t usage;
u64 root_gen;
u64 bytenr;
u64 owner;
u64 generation;
int nritems;
struct list_head list;
struct btrfs_extent_info extents[];
};
static inline size_t btrfs_leaf_ref_size(int nr_extents)
{
return sizeof(struct btrfs_leaf_ref) +
sizeof(struct btrfs_extent_info) * nr_extents;
}
#endif
...@@ -71,13 +71,12 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, ...@@ -71,13 +71,12 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid,
return ret; return ret;
} }
int btrfs_set_root_node(struct btrfs_root_item *item, void btrfs_set_root_node(struct btrfs_root_item *item,
struct extent_buffer *node) struct extent_buffer *node)
{ {
btrfs_set_root_bytenr(item, node->start); btrfs_set_root_bytenr(item, node->start);
btrfs_set_root_level(item, btrfs_header_level(node)); btrfs_set_root_level(item, btrfs_header_level(node));
btrfs_set_root_generation(item, btrfs_header_generation(node)); btrfs_set_root_generation(item, btrfs_header_generation(node));
return 0;
} }
/* /*
......
...@@ -216,17 +216,11 @@ static void wait_current_trans(struct btrfs_root *root) ...@@ -216,17 +216,11 @@ static void wait_current_trans(struct btrfs_root *root)
spin_lock(&root->fs_info->trans_lock); spin_lock(&root->fs_info->trans_lock);
cur_trans = root->fs_info->running_transaction; cur_trans = root->fs_info->running_transaction;
if (cur_trans && cur_trans->blocked) { if (cur_trans && cur_trans->blocked) {
DEFINE_WAIT(wait);
atomic_inc(&cur_trans->use_count); atomic_inc(&cur_trans->use_count);
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&root->fs_info->trans_lock);
while (1) {
prepare_to_wait(&root->fs_info->transaction_wait, &wait, wait_event(root->fs_info->transaction_wait,
TASK_UNINTERRUPTIBLE); !cur_trans->blocked);
if (!cur_trans->blocked)
break;
schedule();
}
finish_wait(&root->fs_info->transaction_wait, &wait);
put_transaction(cur_trans); put_transaction(cur_trans);
} else { } else {
spin_unlock(&root->fs_info->trans_lock); spin_unlock(&root->fs_info->trans_lock);
...@@ -357,19 +351,10 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root ...@@ -357,19 +351,10 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root
} }
/* wait for a transaction commit to be fully complete */ /* wait for a transaction commit to be fully complete */
static noinline int wait_for_commit(struct btrfs_root *root, static noinline void wait_for_commit(struct btrfs_root *root,
struct btrfs_transaction *commit) struct btrfs_transaction *commit)
{ {
DEFINE_WAIT(wait); wait_event(commit->commit_wait, commit->commit_done);
while (!commit->commit_done) {
prepare_to_wait(&commit->commit_wait, &wait,
TASK_UNINTERRUPTIBLE);
if (commit->commit_done)
break;
schedule();
}
finish_wait(&commit->commit_wait, &wait);
return 0;
} }
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
...@@ -1085,22 +1070,7 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info) ...@@ -1085,22 +1070,7 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info)
static void wait_current_trans_commit_start(struct btrfs_root *root, static void wait_current_trans_commit_start(struct btrfs_root *root,
struct btrfs_transaction *trans) struct btrfs_transaction *trans)
{ {
DEFINE_WAIT(wait); wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
if (trans->in_commit)
return;
while (1) {
prepare_to_wait(&root->fs_info->transaction_blocked_wait, &wait,
TASK_UNINTERRUPTIBLE);
if (trans->in_commit) {
finish_wait(&root->fs_info->transaction_blocked_wait,
&wait);
break;
}
schedule();
finish_wait(&root->fs_info->transaction_blocked_wait, &wait);
}
} }
/* /*
...@@ -1110,24 +1080,8 @@ static void wait_current_trans_commit_start(struct btrfs_root *root, ...@@ -1110,24 +1080,8 @@ static void wait_current_trans_commit_start(struct btrfs_root *root,
static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
struct btrfs_transaction *trans) struct btrfs_transaction *trans)
{ {
DEFINE_WAIT(wait); wait_event(root->fs_info->transaction_wait,
trans->commit_done || (trans->in_commit && !trans->blocked));
if (trans->commit_done || (trans->in_commit && !trans->blocked))
return;
while (1) {
prepare_to_wait(&root->fs_info->transaction_wait, &wait,
TASK_UNINTERRUPTIBLE);
if (trans->commit_done ||
(trans->in_commit && !trans->blocked)) {
finish_wait(&root->fs_info->transaction_wait,
&wait);
break;
}
schedule();
finish_wait(&root->fs_info->transaction_wait,
&wait);
}
} }
/* /*
...@@ -1234,8 +1188,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ...@@ -1234,8 +1188,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
atomic_inc(&cur_trans->use_count); atomic_inc(&cur_trans->use_count);
btrfs_end_transaction(trans, root); btrfs_end_transaction(trans, root);
ret = wait_for_commit(root, cur_trans); wait_for_commit(root, cur_trans);
BUG_ON(ret);
put_transaction(cur_trans); put_transaction(cur_trans);
......
...@@ -1617,7 +1617,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, ...@@ -1617,7 +1617,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
return 0; return 0;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path)
return -ENOMEM;
nritems = btrfs_header_nritems(eb); nritems = btrfs_header_nritems(eb);
for (i = 0; i < nritems; i++) { for (i = 0; i < nritems; i++) {
...@@ -1723,7 +1724,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, ...@@ -1723,7 +1724,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
return -ENOMEM; return -ENOMEM;
if (*level == 1) { if (*level == 1) {
wc->process_func(root, next, wc, ptr_gen); ret = wc->process_func(root, next, wc, ptr_gen);
if (ret)
return ret;
path->slots[*level]++; path->slots[*level]++;
if (wc->free) { if (wc->free) {
...@@ -1788,8 +1791,11 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, ...@@ -1788,8 +1791,11 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
parent = path->nodes[*level + 1]; parent = path->nodes[*level + 1];
root_owner = btrfs_header_owner(parent); root_owner = btrfs_header_owner(parent);
wc->process_func(root, path->nodes[*level], wc, ret = wc->process_func(root, path->nodes[*level], wc,
btrfs_header_generation(path->nodes[*level])); btrfs_header_generation(path->nodes[*level]));
if (ret)
return ret;
if (wc->free) { if (wc->free) {
struct extent_buffer *next; struct extent_buffer *next;
......
...@@ -1037,7 +1037,8 @@ static noinline int find_next_chunk(struct btrfs_root *root, ...@@ -1037,7 +1037,8 @@ static noinline int find_next_chunk(struct btrfs_root *root,
struct btrfs_key found_key; struct btrfs_key found_key;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path)
return -ENOMEM;
key.objectid = objectid; key.objectid = objectid;
key.offset = (u64)-1; key.offset = (u64)-1;
...@@ -2061,8 +2062,10 @@ int btrfs_balance(struct btrfs_root *dev_root) ...@@ -2061,8 +2062,10 @@ int btrfs_balance(struct btrfs_root *dev_root)
/* step two, relocate all the chunks */ /* step two, relocate all the chunks */
path = btrfs_alloc_path(); path = btrfs_alloc_path();
BUG_ON(!path); if (!path) {
ret = -ENOMEM;
goto error;
}
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.offset = (u64)-1; key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY; key.type = BTRFS_CHUNK_ITEM_KEY;
...@@ -2661,7 +2664,8 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, ...@@ -2661,7 +2664,8 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
ret = find_next_chunk(fs_info->chunk_root, ret = find_next_chunk(fs_info->chunk_root,
BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset); BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
BUG_ON(ret); if (ret)
return ret;
alloc_profile = BTRFS_BLOCK_GROUP_METADATA | alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
(fs_info->metadata_alloc_profile & (fs_info->metadata_alloc_profile &
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment