Commit 441f4058 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable

* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: (30 commits)
  Btrfs: fix the inode ref searches done by btrfs_search_path_in_tree
  Btrfs: allow treeid==0 in the inode lookup ioctl
  Btrfs: return keys for large items to the search ioctl
  Btrfs: fix key checks and advance in the search ioctl
  Btrfs: buffer results in the space_info ioctl
  Btrfs: use __u64 types in ioctl.h
  Btrfs: fix search_ioctl key advance
  Btrfs: fix gfp flags masking in the compression code
  Btrfs: don't look at bio flags after submit_bio
  btrfs: using btrfs_stack_device_id() get devid
  btrfs: use memparse
  Btrfs: add a "df" ioctl for btrfs
  Btrfs: cache the extent state everywhere we possibly can V2
  Btrfs: cache ordered extent when completing io
  Btrfs: cache extent state in find_delalloc_range
  Btrfs: change the ordered tree to use a spinlock instead of a mutex
  Btrfs: finish read pages in the order they are submitted
  btrfs: fix btrfs_mkdir goto for no free objectids
  Btrfs: flush data on snapshot creation
  Btrfs: make df be a little bit more understandable
  ...
parents 7c34691a 8ad6fcab
...@@ -153,6 +153,11 @@ struct btrfs_inode { ...@@ -153,6 +153,11 @@ struct btrfs_inode {
unsigned ordered_data_close:1; unsigned ordered_data_close:1;
unsigned dummy_inode:1; unsigned dummy_inode:1;
/*
* always compress this one file
*/
unsigned force_compress:1;
struct inode vfs_inode; struct inode vfs_inode;
}; };
......
...@@ -478,7 +478,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -478,7 +478,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
goto next; goto next;
} }
page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS); page = alloc_page(mapping_gfp_mask(mapping) & ~__GFP_FS);
if (!page) if (!page)
break; break;
......
...@@ -373,11 +373,13 @@ struct btrfs_super_block { ...@@ -373,11 +373,13 @@ struct btrfs_super_block {
* ones specified below then we will fail to mount * ones specified below then we will fail to mount
*/ */
#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0)
#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (2ULL << 0)
#define BTRFS_FEATURE_COMPAT_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_SUPP 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
#define BTRFS_FEATURE_INCOMPAT_SUPP \ #define BTRFS_FEATURE_INCOMPAT_SUPP \
BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL)
/* /*
* A leaf is full of items. offset and size tell us where to find * A leaf is full of items. offset and size tell us where to find
...@@ -1182,7 +1184,6 @@ struct btrfs_root { ...@@ -1182,7 +1184,6 @@ struct btrfs_root {
#define BTRFS_INODE_NOATIME (1 << 9) #define BTRFS_INODE_NOATIME (1 << 9)
#define BTRFS_INODE_DIRSYNC (1 << 10) #define BTRFS_INODE_DIRSYNC (1 << 10)
/* some macros to generate set/get funcs for the struct fields. This /* some macros to generate set/get funcs for the struct fields. This
* assumes there is a lefoo_to_cpu for every type, so lets make a simple * assumes there is a lefoo_to_cpu for every type, so lets make a simple
* one for u8: * one for u8:
...@@ -1842,7 +1843,7 @@ BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block, ...@@ -1842,7 +1843,7 @@ BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block,
BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block,
compat_flags, 64); compat_flags, 64);
BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block,
compat_flags, 64); compat_ro_flags, 64);
BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block,
incompat_flags, 64); incompat_flags, 64);
BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block,
...@@ -2310,7 +2311,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -2310,7 +2311,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
u32 min_type); u32 min_type);
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end); int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state);
int btrfs_writepages(struct address_space *mapping, int btrfs_writepages(struct address_space *mapping,
struct writeback_control *wbc); struct writeback_control *wbc);
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
...@@ -2335,7 +2337,7 @@ int btrfs_init_cachep(void); ...@@ -2335,7 +2337,7 @@ int btrfs_init_cachep(void);
void btrfs_destroy_cachep(void); void btrfs_destroy_cachep(void);
long btrfs_ioctl_trans_end(struct file *file); long btrfs_ioctl_trans_end(struct file *file);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root); struct btrfs_root *root, int *was_new);
int btrfs_commit_write(struct file *file, struct page *page, int btrfs_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to); unsigned from, unsigned to);
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
...@@ -2386,7 +2388,6 @@ void btrfs_sysfs_del_super(struct btrfs_fs_info *root); ...@@ -2386,7 +2388,6 @@ void btrfs_sysfs_del_super(struct btrfs_fs_info *root);
ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
/* super.c */ /* super.c */
u64 btrfs_parse_size(char *str);
int btrfs_parse_options(struct btrfs_root *root, char *options); int btrfs_parse_options(struct btrfs_root *root, char *options);
int btrfs_sync_fs(struct super_block *sb, int wait); int btrfs_sync_fs(struct super_block *sb, int wait);
......
...@@ -263,13 +263,15 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, ...@@ -263,13 +263,15 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
static int verify_parent_transid(struct extent_io_tree *io_tree, static int verify_parent_transid(struct extent_io_tree *io_tree,
struct extent_buffer *eb, u64 parent_transid) struct extent_buffer *eb, u64 parent_transid)
{ {
struct extent_state *cached_state = NULL;
int ret; int ret;
if (!parent_transid || btrfs_header_generation(eb) == parent_transid) if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
return 0; return 0;
lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
if (extent_buffer_uptodate(io_tree, eb) && 0, &cached_state, GFP_NOFS);
if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
btrfs_header_generation(eb) == parent_transid) { btrfs_header_generation(eb) == parent_transid) {
ret = 0; ret = 0;
goto out; goto out;
...@@ -282,10 +284,10 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, ...@@ -282,10 +284,10 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
(unsigned long long)btrfs_header_generation(eb)); (unsigned long long)btrfs_header_generation(eb));
} }
ret = 1; ret = 1;
clear_extent_buffer_uptodate(io_tree, eb); clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
out: out:
unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
GFP_NOFS); &cached_state, GFP_NOFS);
return ret; return ret;
} }
...@@ -2497,7 +2499,8 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid) ...@@ -2497,7 +2499,8 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
int ret; int ret;
struct inode *btree_inode = buf->first_page->mapping->host; struct inode *btree_inode = buf->first_page->mapping->host;
ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf); ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf,
NULL);
if (!ret) if (!ret)
return ret; return ret;
......
...@@ -95,7 +95,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, ...@@ -95,7 +95,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0; key.offset = 0;
inode = btrfs_iget(sb, &key, root); inode = btrfs_iget(sb, &key, root, NULL);
if (IS_ERR(inode)) { if (IS_ERR(inode)) {
err = PTR_ERR(inode); err = PTR_ERR(inode);
goto fail; goto fail;
...@@ -223,7 +223,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child) ...@@ -223,7 +223,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
key.type = BTRFS_INODE_ITEM_KEY; key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0; key.offset = 0;
dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root)); dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
if (!IS_ERR(dentry)) if (!IS_ERR(dentry))
dentry->d_op = &btrfs_dentry_operations; dentry->d_op = &btrfs_dentry_operations;
return dentry; return dentry;
......
...@@ -6561,6 +6561,7 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root, ...@@ -6561,6 +6561,7 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root,
struct btrfs_key key; struct btrfs_key key;
struct inode *inode = NULL; struct inode *inode = NULL;
struct btrfs_file_extent_item *fi; struct btrfs_file_extent_item *fi;
struct extent_state *cached_state = NULL;
u64 num_bytes; u64 num_bytes;
u64 skip_objectid = 0; u64 skip_objectid = 0;
u32 nritems; u32 nritems;
...@@ -6589,12 +6590,14 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root, ...@@ -6589,12 +6590,14 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root,
} }
num_bytes = btrfs_file_extent_num_bytes(leaf, fi); num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
lock_extent(&BTRFS_I(inode)->io_tree, key.offset, lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
key.offset + num_bytes - 1, GFP_NOFS); key.offset + num_bytes - 1, 0, &cached_state,
GFP_NOFS);
btrfs_drop_extent_cache(inode, key.offset, btrfs_drop_extent_cache(inode, key.offset,
key.offset + num_bytes - 1, 1); key.offset + num_bytes - 1, 1);
unlock_extent(&BTRFS_I(inode)->io_tree, key.offset, unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
key.offset + num_bytes - 1, GFP_NOFS); key.offset + num_bytes - 1, &cached_state,
GFP_NOFS);
cond_resched(); cond_resched();
} }
iput(inode); iput(inode);
......
...@@ -513,7 +513,10 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -513,7 +513,10 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
u64 last_end; u64 last_end;
int err; int err;
int set = 0; int set = 0;
int clear = 0;
if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
clear = 1;
again: again:
if (!prealloc && (mask & __GFP_WAIT)) { if (!prealloc && (mask & __GFP_WAIT)) {
prealloc = alloc_extent_state(mask); prealloc = alloc_extent_state(mask);
...@@ -524,14 +527,20 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -524,14 +527,20 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
spin_lock(&tree->lock); spin_lock(&tree->lock);
if (cached_state) { if (cached_state) {
cached = *cached_state; cached = *cached_state;
*cached_state = NULL;
cached_state = NULL; if (clear) {
*cached_state = NULL;
cached_state = NULL;
}
if (cached && cached->tree && cached->start == start) { if (cached && cached->tree && cached->start == start) {
atomic_dec(&cached->refs); if (clear)
atomic_dec(&cached->refs);
state = cached; state = cached;
goto hit_next; goto hit_next;
} }
free_extent_state(cached); if (clear)
free_extent_state(cached);
} }
/* /*
* this search will find the extents that end after * this search will find the extents that end after
...@@ -946,11 +955,11 @@ int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -946,11 +955,11 @@ int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
} }
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask) struct extent_state **cached_state, gfp_t mask)
{ {
return set_extent_bit(tree, start, end, return set_extent_bit(tree, start, end,
EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE, EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
0, NULL, NULL, mask); 0, NULL, cached_state, mask);
} }
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
...@@ -984,10 +993,11 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -984,10 +993,11 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
} }
static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
u64 end, gfp_t mask) u64 end, struct extent_state **cached_state,
gfp_t mask)
{ {
return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
NULL, mask); cached_state, mask);
} }
int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
...@@ -1171,7 +1181,8 @@ struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, ...@@ -1171,7 +1181,8 @@ struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
* 1 is returned if we find something, 0 if nothing was in the tree * 1 is returned if we find something, 0 if nothing was in the tree
*/ */
static noinline u64 find_delalloc_range(struct extent_io_tree *tree, static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
u64 *start, u64 *end, u64 max_bytes) u64 *start, u64 *end, u64 max_bytes,
struct extent_state **cached_state)
{ {
struct rb_node *node; struct rb_node *node;
struct extent_state *state; struct extent_state *state;
...@@ -1203,8 +1214,11 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree, ...@@ -1203,8 +1214,11 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
*end = state->end; *end = state->end;
goto out; goto out;
} }
if (!found) if (!found) {
*start = state->start; *start = state->start;
*cached_state = state;
atomic_inc(&state->refs);
}
found++; found++;
*end = state->end; *end = state->end;
cur_start = state->end + 1; cur_start = state->end + 1;
...@@ -1336,10 +1350,11 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode, ...@@ -1336,10 +1350,11 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
delalloc_start = *start; delalloc_start = *start;
delalloc_end = 0; delalloc_end = 0;
found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
max_bytes); max_bytes, &cached_state);
if (!found || delalloc_end <= *start) { if (!found || delalloc_end <= *start) {
*start = delalloc_start; *start = delalloc_start;
*end = delalloc_end; *end = delalloc_end;
free_extent_state(cached_state);
return found; return found;
} }
...@@ -1722,7 +1737,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err) ...@@ -1722,7 +1737,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
} }
if (!uptodate) { if (!uptodate) {
clear_extent_uptodate(tree, start, end, GFP_NOFS); clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
ClearPageUptodate(page); ClearPageUptodate(page);
SetPageError(page); SetPageError(page);
} }
...@@ -1750,7 +1765,8 @@ static void end_bio_extent_writepage(struct bio *bio, int err) ...@@ -1750,7 +1765,8 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
static void end_bio_extent_readpage(struct bio *bio, int err) static void end_bio_extent_readpage(struct bio *bio, int err)
{ {
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
struct bio_vec *bvec = bio->bi_io_vec;
struct extent_io_tree *tree; struct extent_io_tree *tree;
u64 start; u64 start;
u64 end; u64 end;
...@@ -1773,7 +1789,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) ...@@ -1773,7 +1789,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
else else
whole_page = 0; whole_page = 0;
if (--bvec >= bio->bi_io_vec) if (++bvec <= bvec_end)
prefetchw(&bvec->bv_page->flags); prefetchw(&bvec->bv_page->flags);
if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
...@@ -1818,7 +1834,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) ...@@ -1818,7 +1834,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
} }
check_page_locked(tree, page); check_page_locked(tree, page);
} }
} while (bvec >= bio->bi_io_vec); } while (bvec <= bvec_end);
bio_put(bio); bio_put(bio);
} }
...@@ -2704,6 +2720,7 @@ int extent_readpages(struct extent_io_tree *tree, ...@@ -2704,6 +2720,7 @@ int extent_readpages(struct extent_io_tree *tree,
int extent_invalidatepage(struct extent_io_tree *tree, int extent_invalidatepage(struct extent_io_tree *tree,
struct page *page, unsigned long offset) struct page *page, unsigned long offset)
{ {
struct extent_state *cached_state = NULL;
u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
u64 end = start + PAGE_CACHE_SIZE - 1; u64 end = start + PAGE_CACHE_SIZE - 1;
size_t blocksize = page->mapping->host->i_sb->s_blocksize; size_t blocksize = page->mapping->host->i_sb->s_blocksize;
...@@ -2712,12 +2729,12 @@ int extent_invalidatepage(struct extent_io_tree *tree, ...@@ -2712,12 +2729,12 @@ int extent_invalidatepage(struct extent_io_tree *tree,
if (start > end) if (start > end)
return 0; return 0;
lock_extent(tree, start, end, GFP_NOFS); lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
wait_on_page_writeback(page); wait_on_page_writeback(page);
clear_extent_bit(tree, start, end, clear_extent_bit(tree, start, end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, EXTENT_DO_ACCOUNTING,
1, 1, NULL, GFP_NOFS); 1, 1, &cached_state, GFP_NOFS);
return 0; return 0;
} }
...@@ -2920,16 +2937,17 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock, ...@@ -2920,16 +2937,17 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
get_extent_t *get_extent) get_extent_t *get_extent)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct extent_state *cached_state = NULL;
u64 start = iblock << inode->i_blkbits; u64 start = iblock << inode->i_blkbits;
sector_t sector = 0; sector_t sector = 0;
size_t blksize = (1 << inode->i_blkbits); size_t blksize = (1 << inode->i_blkbits);
struct extent_map *em; struct extent_map *em;
lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
GFP_NOFS); 0, &cached_state, GFP_NOFS);
em = get_extent(inode, NULL, 0, start, blksize, 0); em = get_extent(inode, NULL, 0, start, blksize, 0);
unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
GFP_NOFS); start + blksize - 1, &cached_state, GFP_NOFS);
if (!em || IS_ERR(em)) if (!em || IS_ERR(em))
return 0; return 0;
...@@ -2951,6 +2969,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, ...@@ -2951,6 +2969,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u32 flags = 0; u32 flags = 0;
u64 disko = 0; u64 disko = 0;
struct extent_map *em = NULL; struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
int end = 0; int end = 0;
u64 em_start = 0, em_len = 0; u64 em_start = 0, em_len = 0;
unsigned long emflags; unsigned long emflags;
...@@ -2959,8 +2978,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, ...@@ -2959,8 +2978,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (len == 0) if (len == 0)
return -EINVAL; return -EINVAL;
lock_extent(&BTRFS_I(inode)->io_tree, start, start + len, lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
GFP_NOFS); &cached_state, GFP_NOFS);
em = get_extent(inode, NULL, 0, off, max - off, 0); em = get_extent(inode, NULL, 0, off, max - off, 0);
if (!em) if (!em)
goto out; goto out;
...@@ -3023,8 +3042,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, ...@@ -3023,8 +3042,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
out_free: out_free:
free_extent_map(em); free_extent_map(em);
out: out:
unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len, unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
GFP_NOFS); &cached_state, GFP_NOFS);
return ret; return ret;
} }
...@@ -3264,7 +3283,8 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, ...@@ -3264,7 +3283,8 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
} }
int clear_extent_buffer_uptodate(struct extent_io_tree *tree, int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb) struct extent_buffer *eb,
struct extent_state **cached_state)
{ {
unsigned long i; unsigned long i;
struct page *page; struct page *page;
...@@ -3274,7 +3294,7 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree, ...@@ -3274,7 +3294,7 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
GFP_NOFS); cached_state, GFP_NOFS);
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i); page = extent_buffer_page(eb, i);
if (page) if (page)
...@@ -3334,7 +3354,8 @@ int extent_range_uptodate(struct extent_io_tree *tree, ...@@ -3334,7 +3354,8 @@ int extent_range_uptodate(struct extent_io_tree *tree,
} }
int extent_buffer_uptodate(struct extent_io_tree *tree, int extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb) struct extent_buffer *eb,
struct extent_state *cached_state)
{ {
int ret = 0; int ret = 0;
unsigned long num_pages; unsigned long num_pages;
...@@ -3346,7 +3367,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree, ...@@ -3346,7 +3367,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
return 1; return 1;
ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
EXTENT_UPTODATE, 1, NULL); EXTENT_UPTODATE, 1, cached_state);
if (ret) if (ret)
return ret; return ret;
......
...@@ -163,6 +163,8 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); ...@@ -163,6 +163,8 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, struct extent_state **cached, gfp_t mask); int bits, struct extent_state **cached, gfp_t mask);
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached, gfp_t mask);
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask); gfp_t mask);
int extent_read_full_page(struct extent_io_tree *tree, struct page *page, int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
...@@ -196,7 +198,7 @@ int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -196,7 +198,7 @@ int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start, int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start,
u64 end, gfp_t mask); u64 end, gfp_t mask);
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask); struct extent_state **cached_state, gfp_t mask);
int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask); gfp_t mask);
int find_first_extent_bit(struct extent_io_tree *tree, u64 start, int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
...@@ -281,9 +283,11 @@ int test_extent_buffer_dirty(struct extent_io_tree *tree, ...@@ -281,9 +283,11 @@ int test_extent_buffer_dirty(struct extent_io_tree *tree,
int set_extent_buffer_uptodate(struct extent_io_tree *tree, int set_extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb); struct extent_buffer *eb);
int clear_extent_buffer_uptodate(struct extent_io_tree *tree, int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb); struct extent_buffer *eb,
struct extent_state **cached_state);
int extent_buffer_uptodate(struct extent_io_tree *tree, int extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb); struct extent_buffer *eb,
struct extent_state *cached_state);
int map_extent_buffer(struct extent_buffer *eb, unsigned long offset, int map_extent_buffer(struct extent_buffer *eb, unsigned long offset,
unsigned long min_len, char **token, char **map, unsigned long min_len, char **token, char **map,
unsigned long *map_start, unsigned long *map_start,
......
...@@ -123,7 +123,8 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, ...@@ -123,7 +123,8 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
root->sectorsize - 1) & ~((u64)root->sectorsize - 1); root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
end_of_last_block = start_pos + num_bytes - 1; end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
NULL);
if (err) if (err)
return err; return err;
...@@ -753,6 +754,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, ...@@ -753,6 +754,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
loff_t pos, unsigned long first_index, loff_t pos, unsigned long first_index,
unsigned long last_index, size_t write_bytes) unsigned long last_index, size_t write_bytes)
{ {
struct extent_state *cached_state = NULL;
int i; int i;
unsigned long index = pos >> PAGE_CACHE_SHIFT; unsigned long index = pos >> PAGE_CACHE_SHIFT;
struct inode *inode = fdentry(file)->d_inode; struct inode *inode = fdentry(file)->d_inode;
...@@ -781,16 +783,18 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, ...@@ -781,16 +783,18 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
} }
if (start_pos < inode->i_size) { if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
lock_extent(&BTRFS_I(inode)->io_tree, lock_extent_bits(&BTRFS_I(inode)->io_tree,
start_pos, last_pos - 1, GFP_NOFS); start_pos, last_pos - 1, 0, &cached_state,
GFP_NOFS);
ordered = btrfs_lookup_first_ordered_extent(inode, ordered = btrfs_lookup_first_ordered_extent(inode,
last_pos - 1); last_pos - 1);
if (ordered && if (ordered &&
ordered->file_offset + ordered->len > start_pos && ordered->file_offset + ordered->len > start_pos &&
ordered->file_offset < last_pos) { ordered->file_offset < last_pos) {
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
unlock_extent(&BTRFS_I(inode)->io_tree, unlock_extent_cached(&BTRFS_I(inode)->io_tree,
start_pos, last_pos - 1, GFP_NOFS); start_pos, last_pos - 1,
&cached_state, GFP_NOFS);
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
unlock_page(pages[i]); unlock_page(pages[i]);
page_cache_release(pages[i]); page_cache_release(pages[i]);
...@@ -802,12 +806,13 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, ...@@ -802,12 +806,13 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
if (ordered) if (ordered)
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos, clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
GFP_NOFS); GFP_NOFS);
unlock_extent(&BTRFS_I(inode)->io_tree, unlock_extent_cached(&BTRFS_I(inode)->io_tree,
start_pos, last_pos - 1, GFP_NOFS); start_pos, last_pos - 1, &cached_state,
GFP_NOFS);
} }
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
clear_page_dirty_for_io(pages[i]); clear_page_dirty_for_io(pages[i]);
......
This diff is collapsed.
This diff is collapsed.
...@@ -30,12 +30,114 @@ struct btrfs_ioctl_vol_args { ...@@ -30,12 +30,114 @@ struct btrfs_ioctl_vol_args {
char name[BTRFS_PATH_NAME_MAX + 1]; char name[BTRFS_PATH_NAME_MAX + 1];
}; };
#define BTRFS_INO_LOOKUP_PATH_MAX 4080
struct btrfs_ioctl_ino_lookup_args {
__u64 treeid;
__u64 objectid;
char name[BTRFS_INO_LOOKUP_PATH_MAX];
};
struct btrfs_ioctl_search_key {
/* which root are we searching. 0 is the tree of tree roots */
__u64 tree_id;
/* keys returned will be >= min and <= max */
__u64 min_objectid;
__u64 max_objectid;
/* keys returned will be >= min and <= max */
__u64 min_offset;
__u64 max_offset;
/* max and min transids to search for */
__u64 min_transid;
__u64 max_transid;
/* keys returned will be >= min and <= max */
__u32 min_type;
__u32 max_type;
/*
* how many items did userland ask for, and how many are we
* returning
*/
__u32 nr_items;
/* align to 64 bits */
__u32 unused;
/* some extra for later */
__u64 unused1;
__u64 unused2;
__u64 unused3;
__u64 unused4;
};
struct btrfs_ioctl_search_header {
__u64 transid;
__u64 objectid;
__u64 offset;
__u32 type;
__u32 len;
};
#define BTRFS_SEARCH_ARGS_BUFSIZE (4096 - sizeof(struct btrfs_ioctl_search_key))
/*
* the buf is an array of search headers where
* each header is followed by the actual item
* the type field is expanded to 32 bits for alignment
*/
struct btrfs_ioctl_search_args {
struct btrfs_ioctl_search_key key;
char buf[BTRFS_SEARCH_ARGS_BUFSIZE];
};
struct btrfs_ioctl_clone_range_args { struct btrfs_ioctl_clone_range_args {
__s64 src_fd; __s64 src_fd;
__u64 src_offset, src_length; __u64 src_offset, src_length;
__u64 dest_offset; __u64 dest_offset;
}; };
/* flags for the defrag range ioctl */
#define BTRFS_DEFRAG_RANGE_COMPRESS 1
#define BTRFS_DEFRAG_RANGE_START_IO 2
struct btrfs_ioctl_defrag_range_args {
/* start of the defrag operation */
__u64 start;
/* number of bytes to defrag, use (u64)-1 to say all */
__u64 len;
/*
* flags for the operation, which can include turning
* on compression for this one defrag
*/
__u64 flags;
/*
* any extent bigger than this will be considered
* already defragged. Use 0 to take the kernel default
* Use 1 to say every single extent must be rewritten
*/
__u32 extent_thresh;
/* spare for later */
__u32 unused[5];
};
struct btrfs_ioctl_space_info {
__u64 flags;
__u64 total_bytes;
__u64 used_bytes;
};
struct btrfs_ioctl_space_args {
__u64 space_slots;
__u64 total_spaces;
struct btrfs_ioctl_space_info spaces[0];
};
#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
struct btrfs_ioctl_vol_args) struct btrfs_ioctl_vol_args)
#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \ #define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
...@@ -67,4 +169,13 @@ struct btrfs_ioctl_clone_range_args { ...@@ -67,4 +169,13 @@ struct btrfs_ioctl_clone_range_args {
struct btrfs_ioctl_vol_args) struct btrfs_ioctl_vol_args)
#define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \ #define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \
struct btrfs_ioctl_vol_args) struct btrfs_ioctl_vol_args)
#define BTRFS_IOC_DEFRAG_RANGE _IOW(BTRFS_IOCTL_MAGIC, 16, \
struct btrfs_ioctl_defrag_range_args)
#define BTRFS_IOC_TREE_SEARCH _IOWR(BTRFS_IOCTL_MAGIC, 17, \
struct btrfs_ioctl_search_args)
#define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \
struct btrfs_ioctl_ino_lookup_args)
#define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, u64)
#define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \
struct btrfs_ioctl_space_args)
#endif #endif
...@@ -174,7 +174,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, ...@@ -174,7 +174,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
if (!entry) if (!entry)
return -ENOMEM; return -ENOMEM;
mutex_lock(&tree->mutex);
entry->file_offset = file_offset; entry->file_offset = file_offset;
entry->start = start; entry->start = start;
entry->len = len; entry->len = len;
...@@ -190,16 +189,17 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, ...@@ -190,16 +189,17 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
INIT_LIST_HEAD(&entry->list); INIT_LIST_HEAD(&entry->list);
INIT_LIST_HEAD(&entry->root_extent_list); INIT_LIST_HEAD(&entry->root_extent_list);
spin_lock(&tree->lock);
node = tree_insert(&tree->tree, file_offset, node = tree_insert(&tree->tree, file_offset,
&entry->rb_node); &entry->rb_node);
BUG_ON(node); BUG_ON(node);
spin_unlock(&tree->lock);
spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
list_add_tail(&entry->root_extent_list, list_add_tail(&entry->root_extent_list,
&BTRFS_I(inode)->root->fs_info->ordered_extents); &BTRFS_I(inode)->root->fs_info->ordered_extents);
spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
mutex_unlock(&tree->mutex);
BUG_ON(node); BUG_ON(node);
return 0; return 0;
} }
...@@ -216,9 +216,9 @@ int btrfs_add_ordered_sum(struct inode *inode, ...@@ -216,9 +216,9 @@ int btrfs_add_ordered_sum(struct inode *inode,
struct btrfs_ordered_inode_tree *tree; struct btrfs_ordered_inode_tree *tree;
tree = &BTRFS_I(inode)->ordered_tree; tree = &BTRFS_I(inode)->ordered_tree;
mutex_lock(&tree->mutex); spin_lock(&tree->lock);
list_add_tail(&sum->list, &entry->list); list_add_tail(&sum->list, &entry->list);
mutex_unlock(&tree->mutex); spin_unlock(&tree->lock);
return 0; return 0;
} }
...@@ -232,15 +232,16 @@ int btrfs_add_ordered_sum(struct inode *inode, ...@@ -232,15 +232,16 @@ int btrfs_add_ordered_sum(struct inode *inode,
* to make sure this function only returns 1 once for a given ordered extent. * to make sure this function only returns 1 once for a given ordered extent.
*/ */
int btrfs_dec_test_ordered_pending(struct inode *inode, int btrfs_dec_test_ordered_pending(struct inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size) u64 file_offset, u64 io_size)
{ {
struct btrfs_ordered_inode_tree *tree; struct btrfs_ordered_inode_tree *tree;
struct rb_node *node; struct rb_node *node;
struct btrfs_ordered_extent *entry; struct btrfs_ordered_extent *entry = NULL;
int ret; int ret;
tree = &BTRFS_I(inode)->ordered_tree; tree = &BTRFS_I(inode)->ordered_tree;
mutex_lock(&tree->mutex); spin_lock(&tree->lock);
node = tree_search(tree, file_offset); node = tree_search(tree, file_offset);
if (!node) { if (!node) {
ret = 1; ret = 1;
...@@ -264,7 +265,11 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, ...@@ -264,7 +265,11 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
else else
ret = 1; ret = 1;
out: out:
mutex_unlock(&tree->mutex); if (!ret && cached && entry) {
*cached = entry;
atomic_inc(&entry->refs);
}
spin_unlock(&tree->lock);
return ret == 0; return ret == 0;
} }
...@@ -291,7 +296,7 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) ...@@ -291,7 +296,7 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
/* /*
* remove an ordered extent from the tree. No references are dropped * remove an ordered extent from the tree. No references are dropped
* and you must wake_up entry->wait. You must hold the tree mutex * and you must wake_up entry->wait. You must hold the tree lock
* while you call this function. * while you call this function.
*/ */
static int __btrfs_remove_ordered_extent(struct inode *inode, static int __btrfs_remove_ordered_extent(struct inode *inode,
...@@ -340,9 +345,9 @@ int btrfs_remove_ordered_extent(struct inode *inode, ...@@ -340,9 +345,9 @@ int btrfs_remove_ordered_extent(struct inode *inode,
int ret; int ret;
tree = &BTRFS_I(inode)->ordered_tree; tree = &BTRFS_I(inode)->ordered_tree;
mutex_lock(&tree->mutex); spin_lock(&tree->lock);
ret = __btrfs_remove_ordered_extent(inode, entry); ret = __btrfs_remove_ordered_extent(inode, entry);
mutex_unlock(&tree->mutex); spin_unlock(&tree->lock);
wake_up(&entry->wait); wake_up(&entry->wait);
return ret; return ret;
...@@ -567,7 +572,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, ...@@ -567,7 +572,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry = NULL; struct btrfs_ordered_extent *entry = NULL;
tree = &BTRFS_I(inode)->ordered_tree; tree = &BTRFS_I(inode)->ordered_tree;
mutex_lock(&tree->mutex); spin_lock(&tree->lock);
node = tree_search(tree, file_offset); node = tree_search(tree, file_offset);
if (!node) if (!node)
goto out; goto out;
...@@ -578,7 +583,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, ...@@ -578,7 +583,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
if (entry) if (entry)
atomic_inc(&entry->refs); atomic_inc(&entry->refs);
out: out:
mutex_unlock(&tree->mutex); spin_unlock(&tree->lock);
return entry; return entry;
} }
...@@ -594,7 +599,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) ...@@ -594,7 +599,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
struct btrfs_ordered_extent *entry = NULL; struct btrfs_ordered_extent *entry = NULL;
tree = &BTRFS_I(inode)->ordered_tree; tree = &BTRFS_I(inode)->ordered_tree;
mutex_lock(&tree->mutex); spin_lock(&tree->lock);
node = tree_search(tree, file_offset); node = tree_search(tree, file_offset);
if (!node) if (!node)
goto out; goto out;
...@@ -602,7 +607,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) ...@@ -602,7 +607,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
atomic_inc(&entry->refs); atomic_inc(&entry->refs);
out: out:
mutex_unlock(&tree->mutex); spin_unlock(&tree->lock);
return entry; return entry;
} }
...@@ -629,7 +634,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, ...@@ -629,7 +634,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
else else
offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
mutex_lock(&tree->mutex); spin_lock(&tree->lock);
disk_i_size = BTRFS_I(inode)->disk_i_size; disk_i_size = BTRFS_I(inode)->disk_i_size;
/* truncate file */ /* truncate file */
...@@ -735,7 +740,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, ...@@ -735,7 +740,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
*/ */
if (ordered) if (ordered)
__btrfs_remove_ordered_extent(inode, ordered); __btrfs_remove_ordered_extent(inode, ordered);
mutex_unlock(&tree->mutex); spin_unlock(&tree->lock);
if (ordered) if (ordered)
wake_up(&ordered->wait); wake_up(&ordered->wait);
return ret; return ret;
...@@ -762,7 +767,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, ...@@ -762,7 +767,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
if (!ordered) if (!ordered)
return 1; return 1;
mutex_lock(&tree->mutex); spin_lock(&tree->lock);
list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
if (disk_bytenr >= ordered_sum->bytenr) { if (disk_bytenr >= ordered_sum->bytenr) {
num_sectors = ordered_sum->len / sectorsize; num_sectors = ordered_sum->len / sectorsize;
...@@ -777,7 +782,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, ...@@ -777,7 +782,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
} }
} }
out: out:
mutex_unlock(&tree->mutex); spin_unlock(&tree->lock);
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
return ret; return ret;
} }
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
/* one of these per inode */ /* one of these per inode */
struct btrfs_ordered_inode_tree { struct btrfs_ordered_inode_tree {
struct mutex mutex; spinlock_t lock;
struct rb_root tree; struct rb_root tree;
struct rb_node *last; struct rb_node *last;
}; };
...@@ -128,7 +128,7 @@ static inline int btrfs_ordered_sum_size(struct btrfs_root *root, ...@@ -128,7 +128,7 @@ static inline int btrfs_ordered_sum_size(struct btrfs_root *root,
static inline void static inline void
btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t) btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
{ {
mutex_init(&t->mutex); spin_lock_init(&t->lock);
t->tree = RB_ROOT; t->tree = RB_ROOT;
t->last = NULL; t->last = NULL;
} }
...@@ -137,7 +137,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry); ...@@ -137,7 +137,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
int btrfs_remove_ordered_extent(struct inode *inode, int btrfs_remove_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry); struct btrfs_ordered_extent *entry);
int btrfs_dec_test_ordered_pending(struct inode *inode, int btrfs_dec_test_ordered_pending(struct inode *inode,
u64 file_offset, u64 io_size); struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size);
int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len, int tyep); u64 start, u64 len, u64 disk_len, int tyep);
int btrfs_add_ordered_sum(struct inode *inode, int btrfs_add_ordered_sum(struct inode *inode,
......
...@@ -2659,7 +2659,7 @@ static int relocate_file_extent_cluster(struct inode *inode, ...@@ -2659,7 +2659,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
EXTENT_BOUNDARY, GFP_NOFS); EXTENT_BOUNDARY, GFP_NOFS);
nr++; nr++;
} }
btrfs_set_extent_delalloc(inode, page_start, page_end); btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
set_page_dirty(page); set_page_dirty(page);
dirty_page++; dirty_page++;
...@@ -3487,7 +3487,7 @@ static struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, ...@@ -3487,7 +3487,7 @@ static struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
key.objectid = objectid; key.objectid = objectid;
key.type = BTRFS_INODE_ITEM_KEY; key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0; key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &key, root); inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
BTRFS_I(inode)->index_cnt = group->key.objectid; BTRFS_I(inode)->index_cnt = group->key.objectid;
......
This diff is collapsed.
...@@ -997,13 +997,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ...@@ -997,13 +997,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
mutex_unlock(&root->fs_info->trans_mutex); mutex_unlock(&root->fs_info->trans_mutex);
if (flush_on_commit) { if (flush_on_commit || snap_pending) {
btrfs_start_delalloc_inodes(root, 1); btrfs_start_delalloc_inodes(root, 1);
ret = btrfs_wait_ordered_extents(root, 0, 1); ret = btrfs_wait_ordered_extents(root, 0, 1);
BUG_ON(ret); BUG_ON(ret);
} else if (snap_pending) {
ret = btrfs_wait_ordered_extents(root, 0, 1);
BUG_ON(ret);
} }
/* /*
......
...@@ -445,7 +445,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root, ...@@ -445,7 +445,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root,
key.objectid = objectid; key.objectid = objectid;
key.type = BTRFS_INODE_ITEM_KEY; key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0; key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &key, root); inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
if (IS_ERR(inode)) { if (IS_ERR(inode)) {
inode = NULL; inode = NULL;
} else if (is_bad_inode(inode)) { } else if (is_bad_inode(inode)) {
......
...@@ -256,13 +256,13 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) ...@@ -256,13 +256,13 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
wake_up(&fs_info->async_submit_wait); wake_up(&fs_info->async_submit_wait);
BUG_ON(atomic_read(&cur->bi_cnt) == 0); BUG_ON(atomic_read(&cur->bi_cnt) == 0);
submit_bio(cur->bi_rw, cur);
num_run++;
batch_run++;
if (bio_rw_flagged(cur, BIO_RW_SYNCIO)) if (bio_rw_flagged(cur, BIO_RW_SYNCIO))
num_sync_run++; num_sync_run++;
submit_bio(cur->bi_rw, cur);
num_run++;
batch_run++;
if (need_resched()) { if (need_resched()) {
if (num_sync_run) { if (num_sync_run) {
blk_run_backing_dev(bdi, NULL); blk_run_backing_dev(bdi, NULL);
...@@ -325,16 +325,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) ...@@ -325,16 +325,6 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
num_sync_run = 0; num_sync_run = 0;
blk_run_backing_dev(bdi, NULL); blk_run_backing_dev(bdi, NULL);
} }
cond_resched();
if (again)
goto loop;
spin_lock(&device->io_lock);
if (device->pending_bios.head || device->pending_sync_bios.head)
goto loop_lock;
spin_unlock(&device->io_lock);
/* /*
* IO has already been through a long path to get here. Checksumming, * IO has already been through a long path to get here. Checksumming,
* async helper threads, perhaps compression. We've done a pretty * async helper threads, perhaps compression. We've done a pretty
...@@ -346,6 +336,16 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) ...@@ -346,6 +336,16 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
* cared about found its way down here. * cared about found its way down here.
*/ */
blk_run_backing_dev(bdi, NULL); blk_run_backing_dev(bdi, NULL);
cond_resched();
if (again)
goto loop;
spin_lock(&device->io_lock);
if (device->pending_bios.head || device->pending_sync_bios.head)
goto loop_lock;
spin_unlock(&device->io_lock);
done: done:
return 0; return 0;
} }
...@@ -365,6 +365,7 @@ static noinline int device_list_add(const char *path, ...@@ -365,6 +365,7 @@ static noinline int device_list_add(const char *path,
struct btrfs_device *device; struct btrfs_device *device;
struct btrfs_fs_devices *fs_devices; struct btrfs_fs_devices *fs_devices;
u64 found_transid = btrfs_super_generation(disk_super); u64 found_transid = btrfs_super_generation(disk_super);
char *name;
fs_devices = find_fsid(disk_super->fsid); fs_devices = find_fsid(disk_super->fsid);
if (!fs_devices) { if (!fs_devices) {
...@@ -411,6 +412,12 @@ static noinline int device_list_add(const char *path, ...@@ -411,6 +412,12 @@ static noinline int device_list_add(const char *path,
device->fs_devices = fs_devices; device->fs_devices = fs_devices;
fs_devices->num_devices++; fs_devices->num_devices++;
} else if (strcmp(device->name, path)) {
name = kstrdup(path, GFP_NOFS);
if (!name)
return -ENOMEM;
kfree(device->name);
device->name = name;
} }
if (found_transid > fs_devices->latest_trans) { if (found_transid > fs_devices->latest_trans) {
...@@ -592,7 +599,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, ...@@ -592,7 +599,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
goto error_close; goto error_close;
disk_super = (struct btrfs_super_block *)bh->b_data; disk_super = (struct btrfs_super_block *)bh->b_data;
devid = le64_to_cpu(disk_super->dev_item.devid); devid = btrfs_stack_device_id(&disk_super->dev_item);
if (devid != device->devid) if (devid != device->devid)
goto error_brelse; goto error_brelse;
...@@ -694,7 +701,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, ...@@ -694,7 +701,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
goto error_close; goto error_close;
} }
disk_super = (struct btrfs_super_block *)bh->b_data; disk_super = (struct btrfs_super_block *)bh->b_data;
devid = le64_to_cpu(disk_super->dev_item.devid); devid = btrfs_stack_device_id(&disk_super->dev_item);
transid = btrfs_super_generation(disk_super); transid = btrfs_super_generation(disk_super);
if (disk_super->label[0]) if (disk_super->label[0])
printk(KERN_INFO "device label %s ", disk_super->label); printk(KERN_INFO "device label %s ", disk_super->label);
...@@ -1187,7 +1194,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) ...@@ -1187,7 +1194,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
goto error_close; goto error_close;
} }
disk_super = (struct btrfs_super_block *)bh->b_data; disk_super = (struct btrfs_super_block *)bh->b_data;
devid = le64_to_cpu(disk_super->dev_item.devid); devid = btrfs_stack_device_id(&disk_super->dev_item);
dev_uuid = disk_super->dev_item.uuid; dev_uuid = disk_super->dev_item.uuid;
device = btrfs_find_device(root, devid, dev_uuid, device = btrfs_find_device(root, devid, dev_uuid,
disk_super->fsid); disk_super->fsid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment