Commit b695188d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs update from Chris Mason:
 "The biggest feature in the pull is the new (and still experimental)
  raid56 code that David Woodhouse started long ago.  I'm still working
  on the parity logging setup that will avoid inconsistent parity after
  a crash, so this is only for testing right now.  But, I'd really like
  to get it out to a broader audience to hammer out any performance
  issues or other problems.

  scrub does not yet correct errors on raid5/6 either.

  Josef has another pass at fsync performance.  The big change here is
  to combine waiting for metadata with waiting for data, which is a big
  latency win.  It is also step one toward using atomics from the
  hardware during a commit.

  Mark Fasheh has a new way to use btrfs send/receive to send only the
  metadata changes.  SUSE is using this to make snapper more efficient
  at finding changes between snapshosts.

  Snapshot-aware defrag is also included.

  Otherwise we have a large number of fixes and cleanups.  Eric Sandeen
  wins the award for removing the most lines, and I'm hoping we steal
  this idea from XFS over and over again."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (118 commits)
  btrfs: fixup/remove module.h usage as required
  Btrfs: delete inline extents when we find them during logging
  btrfs: try harder to allocate raid56 stripe cache
  Btrfs: cleanup to make the function btrfs_delalloc_reserve_metadata more logic
  Btrfs: don't call btrfs_qgroup_free if just btrfs_qgroup_reserve fails
  Btrfs: remove reduplicate check about root in the function btrfs_clean_quota_tree
  Btrfs: return ENOMEM rather than use BUG_ON when btrfs_alloc_path fails
  Btrfs: fix missing deleted items in btrfs_clean_quota_tree
  btrfs: use only inline_pages from extent buffer
  Btrfs: fix wrong reserved space when deleting a snapshot/subvolume
  Btrfs: fix wrong reserved space in qgroup during snap/subv creation
  Btrfs: remove unnecessary dget_parent/dput when creating the pending snapshot
  btrfs: remove a printk from scan_one_device
  Btrfs: fix NULL pointer after aborting a transaction
  Btrfs: fix memory leak of log roots
  Btrfs: copy everything if we've created an inline extent
  btrfs: cleanup for open-coded alignment
  Btrfs: do not change inode flags in rename
  Btrfs: use reserved space for creating a snapshot
  clear chunk_alloc flag on retryable failure
  ...
parents 48476df9 180e001c
...@@ -5,6 +5,9 @@ config BTRFS_FS ...@@ -5,6 +5,9 @@ config BTRFS_FS
select ZLIB_DEFLATE select ZLIB_DEFLATE
select LZO_COMPRESS select LZO_COMPRESS
select LZO_DECOMPRESS select LZO_DECOMPRESS
select RAID6_PQ
select XOR_BLOCKS
help help
Btrfs is a new filesystem with extents, writable snapshotting, Btrfs is a new filesystem with extents, writable snapshotting,
support for multiple devices and many more features. support for multiple devices and many more features.
......
...@@ -8,7 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ ...@@ -8,7 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o \ export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
...@@ -352,11 +352,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, ...@@ -352,11 +352,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
err = __resolve_indirect_ref(fs_info, search_commit_root, err = __resolve_indirect_ref(fs_info, search_commit_root,
time_seq, ref, parents, time_seq, ref, parents,
extent_item_pos); extent_item_pos);
if (err) { if (err)
if (ret == 0)
ret = err;
continue; continue;
}
/* we put the first parent into the ref at hand */ /* we put the first parent into the ref at hand */
ULIST_ITER_INIT(&uiter); ULIST_ITER_INIT(&uiter);
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#ifndef __BTRFS_BACKREF__ #ifndef __BTRFS_BACKREF__
#define __BTRFS_BACKREF__ #define __BTRFS_BACKREF__
#include "ioctl.h" #include <linux/btrfs.h>
#include "ulist.h" #include "ulist.h"
#include "extent_io.h" #include "extent_io.h"
......
...@@ -40,6 +40,8 @@ ...@@ -40,6 +40,8 @@
#define BTRFS_INODE_HAS_ASYNC_EXTENT 6 #define BTRFS_INODE_HAS_ASYNC_EXTENT 6
#define BTRFS_INODE_NEEDS_FULL_SYNC 7 #define BTRFS_INODE_NEEDS_FULL_SYNC 7
#define BTRFS_INODE_COPY_EVERYTHING 8 #define BTRFS_INODE_COPY_EVERYTHING 8
#define BTRFS_INODE_IN_DELALLOC_LIST 9
#define BTRFS_INODE_READDIO_NEED_LOCK 10
/* in memory btrfs inode */ /* in memory btrfs inode */
struct btrfs_inode { struct btrfs_inode {
...@@ -216,4 +218,22 @@ static inline int btrfs_inode_in_log(struct inode *inode, u64 generation) ...@@ -216,4 +218,22 @@ static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
return 0; return 0;
} }
/*
* Disable DIO read nolock optimization, so new dio readers will be forced
* to grab i_mutex. It is used to avoid the endless truncate due to
* nonlocked dio read.
*/
static inline void btrfs_inode_block_unlocked_dio(struct inode *inode)
{
set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags);
smp_mb();
}
static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode)
{
smp_mb__before_clear_bit();
clear_bit(BTRFS_INODE_READDIO_NEED_LOCK,
&BTRFS_I(inode)->runtime_flags);
}
#endif #endif
...@@ -813,8 +813,7 @@ static int btrfsic_process_superblock_dev_mirror( ...@@ -813,8 +813,7 @@ static int btrfsic_process_superblock_dev_mirror(
(bh->b_data + (dev_bytenr & 4095)); (bh->b_data + (dev_bytenr & 4095));
if (btrfs_super_bytenr(super_tmp) != dev_bytenr || if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC, super_tmp->magic != cpu_to_le64(BTRFS_MAGIC) ||
sizeof(super_tmp->magic)) ||
memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) || memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
btrfs_super_nodesize(super_tmp) != state->metablock_size || btrfs_super_nodesize(super_tmp) != state->metablock_size ||
btrfs_super_leafsize(super_tmp) != state->metablock_size || btrfs_super_leafsize(super_tmp) != state->metablock_size ||
......
...@@ -372,7 +372,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, ...@@ -372,7 +372,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
page = compressed_pages[pg_index]; page = compressed_pages[pg_index];
page->mapping = inode->i_mapping; page->mapping = inode->i_mapping;
if (bio->bi_size) if (bio->bi_size)
ret = io_tree->ops->merge_bio_hook(page, 0, ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE,
bio, 0); bio, 0);
else else
...@@ -655,7 +655,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -655,7 +655,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
page->index = em_start >> PAGE_CACHE_SHIFT; page->index = em_start >> PAGE_CACHE_SHIFT;
if (comp_bio->bi_size) if (comp_bio->bi_size)
ret = tree->ops->merge_bio_hook(page, 0, ret = tree->ops->merge_bio_hook(READ, page, 0,
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE,
comp_bio, 0); comp_bio, 0);
else else
......
...@@ -1138,6 +1138,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq, ...@@ -1138,6 +1138,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
switch (tm->op) { switch (tm->op) {
case MOD_LOG_KEY_REMOVE_WHILE_FREEING: case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
BUG_ON(tm->slot < n); BUG_ON(tm->slot < n);
/* Fallthrough */
case MOD_LOG_KEY_REMOVE_WHILE_MOVING: case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
case MOD_LOG_KEY_REMOVE: case MOD_LOG_KEY_REMOVE:
btrfs_set_node_key(eb, &tm->key, tm->slot); btrfs_set_node_key(eb, &tm->key, tm->slot);
...@@ -1222,7 +1223,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, ...@@ -1222,7 +1223,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
__tree_mod_log_rewind(eb_rewin, time_seq, tm); __tree_mod_log_rewind(eb_rewin, time_seq, tm);
WARN_ON(btrfs_header_nritems(eb_rewin) > WARN_ON(btrfs_header_nritems(eb_rewin) >
BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root)); BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
return eb_rewin; return eb_rewin;
} }
...@@ -1441,7 +1442,7 @@ int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2) ...@@ -1441,7 +1442,7 @@ int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
*/ */
int btrfs_realloc_node(struct btrfs_trans_handle *trans, int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *parent, struct btrfs_root *root, struct extent_buffer *parent,
int start_slot, int cache_only, u64 *last_ret, int start_slot, u64 *last_ret,
struct btrfs_key *progress) struct btrfs_key *progress)
{ {
struct extent_buffer *cur; struct extent_buffer *cur;
...@@ -1461,8 +1462,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, ...@@ -1461,8 +1462,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_disk_key disk_key; struct btrfs_disk_key disk_key;
parent_level = btrfs_header_level(parent); parent_level = btrfs_header_level(parent);
if (cache_only && parent_level != 1)
return 0;
WARN_ON(trans->transaction != root->fs_info->running_transaction); WARN_ON(trans->transaction != root->fs_info->running_transaction);
WARN_ON(trans->transid != root->fs_info->generation); WARN_ON(trans->transid != root->fs_info->generation);
...@@ -1508,10 +1507,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, ...@@ -1508,10 +1507,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
else else
uptodate = 0; uptodate = 0;
if (!cur || !uptodate) { if (!cur || !uptodate) {
if (cache_only) {
free_extent_buffer(cur);
continue;
}
if (!cur) { if (!cur) {
cur = read_tree_block(root, blocknr, cur = read_tree_block(root, blocknr,
blocksize, gen); blocksize, gen);
...@@ -4825,8 +4820,8 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) ...@@ -4825,8 +4820,8 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
/* /*
* A helper function to walk down the tree starting at min_key, and looking * A helper function to walk down the tree starting at min_key, and looking
* for nodes or leaves that are either in cache or have a minimum * for nodes or leaves that are have a minimum transaction id.
* transaction id. This is used by the btree defrag code, and tree logging * This is used by the btree defrag code, and tree logging
* *
* This does not cow, but it does stuff the starting key it finds back * This does not cow, but it does stuff the starting key it finds back
* into min_key, so you can call btrfs_search_slot with cow=1 on the * into min_key, so you can call btrfs_search_slot with cow=1 on the
...@@ -4847,7 +4842,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) ...@@ -4847,7 +4842,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
*/ */
int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
struct btrfs_key *max_key, struct btrfs_key *max_key,
struct btrfs_path *path, int cache_only, struct btrfs_path *path,
u64 min_trans) u64 min_trans)
{ {
struct extent_buffer *cur; struct extent_buffer *cur;
...@@ -4887,15 +4882,12 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, ...@@ -4887,15 +4882,12 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
if (sret && slot > 0) if (sret && slot > 0)
slot--; slot--;
/* /*
* check this node pointer against the cache_only and * check this node pointer against the min_trans parameters.
* min_trans parameters. If it isn't in cache or is too * If it is too old, old, skip to the next one.
* old, skip to the next one.
*/ */
while (slot < nritems) { while (slot < nritems) {
u64 blockptr; u64 blockptr;
u64 gen; u64 gen;
struct extent_buffer *tmp;
struct btrfs_disk_key disk_key;
blockptr = btrfs_node_blockptr(cur, slot); blockptr = btrfs_node_blockptr(cur, slot);
gen = btrfs_node_ptr_generation(cur, slot); gen = btrfs_node_ptr_generation(cur, slot);
...@@ -4903,27 +4895,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, ...@@ -4903,27 +4895,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
slot++; slot++;
continue; continue;
} }
if (!cache_only)
break; break;
if (max_key) {
btrfs_node_key(cur, &disk_key, slot);
if (comp_keys(&disk_key, max_key) >= 0) {
ret = 1;
goto out;
}
}
tmp = btrfs_find_tree_block(root, blockptr,
btrfs_level_size(root, level - 1));
if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
free_extent_buffer(tmp);
break;
}
if (tmp)
free_extent_buffer(tmp);
slot++;
} }
find_next_key: find_next_key:
/* /*
...@@ -4934,7 +4906,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, ...@@ -4934,7 +4906,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
path->slots[level] = slot; path->slots[level] = slot;
btrfs_set_path_blocking(path); btrfs_set_path_blocking(path);
sret = btrfs_find_next_key(root, path, min_key, level, sret = btrfs_find_next_key(root, path, min_key, level,
cache_only, min_trans); min_trans);
if (sret == 0) { if (sret == 0) {
btrfs_release_path(path); btrfs_release_path(path);
goto again; goto again;
...@@ -5399,8 +5371,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root, ...@@ -5399,8 +5371,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
/* /*
* this is similar to btrfs_next_leaf, but does not try to preserve * this is similar to btrfs_next_leaf, but does not try to preserve
* and fixup the path. It looks for and returns the next key in the * and fixup the path. It looks for and returns the next key in the
* tree based on the current path and the cache_only and min_trans * tree based on the current path and the min_trans parameters.
* parameters.
* *
* 0 is returned if another key is found, < 0 if there are any errors * 0 is returned if another key is found, < 0 if there are any errors
* and 1 is returned if there are no higher keys in the tree * and 1 is returned if there are no higher keys in the tree
...@@ -5409,8 +5380,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root, ...@@ -5409,8 +5380,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
* calling this function. * calling this function.
*/ */
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *key, int level, struct btrfs_key *key, int level, u64 min_trans)
int cache_only, u64 min_trans)
{ {
int slot; int slot;
struct extent_buffer *c; struct extent_buffer *c;
...@@ -5461,22 +5431,8 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, ...@@ -5461,22 +5431,8 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
if (level == 0) if (level == 0)
btrfs_item_key_to_cpu(c, key, slot); btrfs_item_key_to_cpu(c, key, slot);
else { else {
u64 blockptr = btrfs_node_blockptr(c, slot);
u64 gen = btrfs_node_ptr_generation(c, slot); u64 gen = btrfs_node_ptr_generation(c, slot);
if (cache_only) {
struct extent_buffer *cur;
cur = btrfs_find_tree_block(root, blockptr,
btrfs_level_size(root, level - 1));
if (!cur ||
btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
slot++;
if (cur)
free_extent_buffer(cur);
goto next;
}
free_extent_buffer(cur);
}
if (gen < min_trans) { if (gen < min_trans) {
slot++; slot++;
goto next; goto next;
......
This diff is collapsed.
...@@ -875,7 +875,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, ...@@ -875,7 +875,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
struct btrfs_delayed_item *delayed_item) struct btrfs_delayed_item *delayed_item)
{ {
struct extent_buffer *leaf; struct extent_buffer *leaf;
struct btrfs_item *item;
char *ptr; char *ptr;
int ret; int ret;
...@@ -886,7 +885,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, ...@@ -886,7 +885,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
leaf = path->nodes[0]; leaf = path->nodes[0];
item = btrfs_item_nr(leaf, path->slots[0]);
ptr = btrfs_item_ptr(leaf, path->slots[0], char); ptr = btrfs_item_ptr(leaf, path->slots[0], char);
write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr, write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
...@@ -1065,7 +1063,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) ...@@ -1065,7 +1063,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
} }
} }
static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_path *path, struct btrfs_path *path,
struct btrfs_delayed_node *node) struct btrfs_delayed_node *node)
...@@ -1075,22 +1073,15 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, ...@@ -1075,22 +1073,15 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf; struct extent_buffer *leaf;
int ret; int ret;
mutex_lock(&node->mutex);
if (!node->inode_dirty) {
mutex_unlock(&node->mutex);
return 0;
}
key.objectid = node->inode_id; key.objectid = node->inode_id;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0; key.offset = 0;
ret = btrfs_lookup_inode(trans, root, path, &key, 1); ret = btrfs_lookup_inode(trans, root, path, &key, 1);
if (ret > 0) { if (ret > 0) {
btrfs_release_path(path); btrfs_release_path(path);
mutex_unlock(&node->mutex);
return -ENOENT; return -ENOENT;
} else if (ret < 0) { } else if (ret < 0) {
mutex_unlock(&node->mutex);
return ret; return ret;
} }
...@@ -1105,11 +1096,47 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, ...@@ -1105,11 +1096,47 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
btrfs_delayed_inode_release_metadata(root, node); btrfs_delayed_inode_release_metadata(root, node);
btrfs_release_delayed_inode(node); btrfs_release_delayed_inode(node);
mutex_unlock(&node->mutex);
return 0; return 0;
} }
static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_delayed_node *node)
{
int ret;
mutex_lock(&node->mutex);
if (!node->inode_dirty) {
mutex_unlock(&node->mutex);
return 0;
}
ret = __btrfs_update_delayed_inode(trans, root, path, node);
mutex_unlock(&node->mutex);
return ret;
}
static inline int
__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_delayed_node *node)
{
int ret;
ret = btrfs_insert_delayed_items(trans, path, node->root, node);
if (ret)
return ret;
ret = btrfs_delete_delayed_items(trans, path, node->root, node);
if (ret)
return ret;
ret = btrfs_update_delayed_inode(trans, node->root, path, node);
return ret;
}
/* /*
* Called when committing the transaction. * Called when committing the transaction.
* Returns 0 on success. * Returns 0 on success.
...@@ -1119,7 +1146,6 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, ...@@ -1119,7 +1146,6 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int nr) struct btrfs_root *root, int nr)
{ {
struct btrfs_root *curr_root = root;
struct btrfs_delayed_root *delayed_root; struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node; struct btrfs_delayed_node *curr_node, *prev_node;
struct btrfs_path *path; struct btrfs_path *path;
...@@ -1142,15 +1168,8 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, ...@@ -1142,15 +1168,8 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
curr_node = btrfs_first_delayed_node(delayed_root); curr_node = btrfs_first_delayed_node(delayed_root);
while (curr_node && (!count || (count && nr--))) { while (curr_node && (!count || (count && nr--))) {
curr_root = curr_node->root; ret = __btrfs_commit_inode_delayed_items(trans, path,
ret = btrfs_insert_delayed_items(trans, path, curr_root,
curr_node); curr_node);
if (!ret)
ret = btrfs_delete_delayed_items(trans, path,
curr_root, curr_node);
if (!ret)
ret = btrfs_update_delayed_inode(trans, curr_root,
path, curr_node);
if (ret) { if (ret) {
btrfs_release_delayed_node(curr_node); btrfs_release_delayed_node(curr_node);
curr_node = NULL; curr_node = NULL;
...@@ -1183,51 +1202,93 @@ int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, ...@@ -1183,51 +1202,93 @@ int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
return __btrfs_run_delayed_items(trans, root, nr); return __btrfs_run_delayed_items(trans, root, nr);
} }
static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_delayed_node *node) struct inode *inode)
{ {
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_block_rsv *block_rsv; struct btrfs_block_rsv *block_rsv;
int ret; int ret;
if (!delayed_node)
return 0;
mutex_lock(&delayed_node->mutex);
if (!delayed_node->count) {
mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_node(delayed_node);
return 0;
}
mutex_unlock(&delayed_node->mutex);
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->leave_spinning = 1; path->leave_spinning = 1;
block_rsv = trans->block_rsv; block_rsv = trans->block_rsv;
trans->block_rsv = &node->root->fs_info->delayed_block_rsv; trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
ret = btrfs_insert_delayed_items(trans, path, node->root, node); ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
if (!ret)
ret = btrfs_delete_delayed_items(trans, path, node->root, node);
if (!ret)
ret = btrfs_update_delayed_inode(trans, node->root, path, node);
btrfs_free_path(path);
btrfs_release_delayed_node(delayed_node);
btrfs_free_path(path);
trans->block_rsv = block_rsv; trans->block_rsv = block_rsv;
return ret; return ret;
} }
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, int btrfs_commit_inode_delayed_inode(struct inode *inode)
struct inode *inode)
{ {
struct btrfs_trans_handle *trans;
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
struct btrfs_path *path;
struct btrfs_block_rsv *block_rsv;
int ret; int ret;
if (!delayed_node) if (!delayed_node)
return 0; return 0;
mutex_lock(&delayed_node->mutex); mutex_lock(&delayed_node->mutex);
if (!delayed_node->count) { if (!delayed_node->inode_dirty) {
mutex_unlock(&delayed_node->mutex); mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_node(delayed_node); btrfs_release_delayed_node(delayed_node);
return 0; return 0;
} }
mutex_unlock(&delayed_node->mutex); mutex_unlock(&delayed_node->mutex);
ret = __btrfs_commit_inode_delayed_items(trans, delayed_node); trans = btrfs_join_transaction(delayed_node->root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out;
}
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto trans_out;
}
path->leave_spinning = 1;
block_rsv = trans->block_rsv;
trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
mutex_lock(&delayed_node->mutex);
if (delayed_node->inode_dirty)
ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
path, delayed_node);
else
ret = 0;
mutex_unlock(&delayed_node->mutex);
btrfs_free_path(path);
trans->block_rsv = block_rsv;
trans_out:
btrfs_end_transaction(trans, delayed_node->root);
btrfs_btree_balance_dirty(delayed_node->root);
out:
btrfs_release_delayed_node(delayed_node); btrfs_release_delayed_node(delayed_node);
return ret; return ret;
} }
...@@ -1258,7 +1319,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) ...@@ -1258,7 +1319,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
struct btrfs_root *root; struct btrfs_root *root;
struct btrfs_block_rsv *block_rsv; struct btrfs_block_rsv *block_rsv;
int need_requeue = 0; int need_requeue = 0;
int ret;
async_node = container_of(work, struct btrfs_async_delayed_node, work); async_node = container_of(work, struct btrfs_async_delayed_node, work);
...@@ -1277,14 +1337,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) ...@@ -1277,14 +1337,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
block_rsv = trans->block_rsv; block_rsv = trans->block_rsv;
trans->block_rsv = &root->fs_info->delayed_block_rsv; trans->block_rsv = &root->fs_info->delayed_block_rsv;
ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
if (!ret)
ret = btrfs_delete_delayed_items(trans, path, root,
delayed_node);
if (!ret)
btrfs_update_delayed_inode(trans, root, path, delayed_node);
/* /*
* Maybe new delayed items have been inserted, so we need requeue * Maybe new delayed items have been inserted, so we need requeue
* the work. Besides that, we must dequeue the empty delayed nodes * the work. Besides that, we must dequeue the empty delayed nodes
......
...@@ -117,6 +117,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, ...@@ -117,6 +117,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
/* Used for evicting the inode. */ /* Used for evicting the inode. */
void btrfs_remove_delayed_node(struct inode *inode); void btrfs_remove_delayed_node(struct inode *inode);
void btrfs_kill_delayed_inode_items(struct inode *inode); void btrfs_kill_delayed_inode_items(struct inode *inode);
int btrfs_commit_inode_delayed_inode(struct inode *inode);
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
......
...@@ -23,6 +23,10 @@ ...@@ -23,6 +23,10 @@
#include "delayed-ref.h" #include "delayed-ref.h"
#include "transaction.h" #include "transaction.h"
struct kmem_cache *btrfs_delayed_ref_head_cachep;
struct kmem_cache *btrfs_delayed_tree_ref_cachep;
struct kmem_cache *btrfs_delayed_data_ref_cachep;
struct kmem_cache *btrfs_delayed_extent_op_cachep;
/* /*
* delayed back reference update tracking. For subvolume trees * delayed back reference update tracking. For subvolume trees
* we queue up extent allocations and backref maintenance for * we queue up extent allocations and backref maintenance for
...@@ -422,6 +426,14 @@ int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, ...@@ -422,6 +426,14 @@ int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
return 1; return 1;
} }
void btrfs_release_ref_cluster(struct list_head *cluster)
{
struct list_head *pos, *q;
list_for_each_safe(pos, q, cluster)
list_del_init(pos);
}
/* /*
* helper function to update an extent delayed ref in the * helper function to update an extent delayed ref in the
* rbtree. existing and update must both have the same * rbtree. existing and update must both have the same
...@@ -511,7 +523,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing, ...@@ -511,7 +523,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
ref->extent_op->flags_to_set; ref->extent_op->flags_to_set;
existing_ref->extent_op->update_flags = 1; existing_ref->extent_op->update_flags = 1;
} }
kfree(ref->extent_op); btrfs_free_delayed_extent_op(ref->extent_op);
} }
} }
/* /*
...@@ -592,7 +604,7 @@ static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info, ...@@ -592,7 +604,7 @@ static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly * we've updated the existing ref, free the newly
* allocated ref * allocated ref
*/ */
kfree(head_ref); kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
} else { } else {
delayed_refs->num_heads++; delayed_refs->num_heads++;
delayed_refs->num_heads_ready++; delayed_refs->num_heads_ready++;
...@@ -653,7 +665,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info, ...@@ -653,7 +665,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly * we've updated the existing ref, free the newly
* allocated ref * allocated ref
*/ */
kfree(full_ref); kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
} else { } else {
delayed_refs->num_entries++; delayed_refs->num_entries++;
trans->delayed_ref_updates++; trans->delayed_ref_updates++;
...@@ -714,7 +726,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info, ...@@ -714,7 +726,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly * we've updated the existing ref, free the newly
* allocated ref * allocated ref
*/ */
kfree(full_ref); kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
} else { } else {
delayed_refs->num_entries++; delayed_refs->num_entries++;
trans->delayed_ref_updates++; trans->delayed_ref_updates++;
...@@ -738,13 +750,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, ...@@ -738,13 +750,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
BUG_ON(extent_op && extent_op->is_data); BUG_ON(extent_op && extent_op->is_data);
ref = kmalloc(sizeof(*ref), GFP_NOFS); ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
if (!ref) if (!ref)
return -ENOMEM; return -ENOMEM;
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) { if (!head_ref) {
kfree(ref); kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
return -ENOMEM; return -ENOMEM;
} }
...@@ -786,13 +798,13 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, ...@@ -786,13 +798,13 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
BUG_ON(extent_op && !extent_op->is_data); BUG_ON(extent_op && !extent_op->is_data);
ref = kmalloc(sizeof(*ref), GFP_NOFS); ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
if (!ref) if (!ref)
return -ENOMEM; return -ENOMEM;
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) { if (!head_ref) {
kfree(ref); kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
return -ENOMEM; return -ENOMEM;
} }
...@@ -826,7 +838,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, ...@@ -826,7 +838,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) if (!head_ref)
return -ENOMEM; return -ENOMEM;
...@@ -860,3 +872,51 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) ...@@ -860,3 +872,51 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
return btrfs_delayed_node_to_head(ref); return btrfs_delayed_node_to_head(ref);
return NULL; return NULL;
} }
void btrfs_delayed_ref_exit(void)
{
if (btrfs_delayed_ref_head_cachep)
kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
if (btrfs_delayed_tree_ref_cachep)
kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
if (btrfs_delayed_data_ref_cachep)
kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
if (btrfs_delayed_extent_op_cachep)
kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
}
int btrfs_delayed_ref_init(void)
{
btrfs_delayed_ref_head_cachep = kmem_cache_create(
"btrfs_delayed_ref_head",
sizeof(struct btrfs_delayed_ref_head), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_delayed_ref_head_cachep)
goto fail;
btrfs_delayed_tree_ref_cachep = kmem_cache_create(
"btrfs_delayed_tree_ref",
sizeof(struct btrfs_delayed_tree_ref), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_delayed_tree_ref_cachep)
goto fail;
btrfs_delayed_data_ref_cachep = kmem_cache_create(
"btrfs_delayed_data_ref",
sizeof(struct btrfs_delayed_data_ref), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_delayed_data_ref_cachep)
goto fail;
btrfs_delayed_extent_op_cachep = kmem_cache_create(
"btrfs_delayed_extent_op",
sizeof(struct btrfs_delayed_extent_op), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_delayed_extent_op_cachep)
goto fail;
return 0;
fail:
btrfs_delayed_ref_exit();
return -ENOMEM;
}
...@@ -131,6 +131,15 @@ struct btrfs_delayed_ref_root { ...@@ -131,6 +131,15 @@ struct btrfs_delayed_ref_root {
/* total number of head nodes ready for processing */ /* total number of head nodes ready for processing */
unsigned long num_heads_ready; unsigned long num_heads_ready;
/*
* bumped when someone is making progress on the delayed
* refs, so that other procs know they are just adding to
* contention intead of helping
*/
atomic_t procs_running_refs;
atomic_t ref_seq;
wait_queue_head_t wait;
/* /*
* set when the tree is flushing before a transaction commit, * set when the tree is flushing before a transaction commit,
* used by the throttling code to decide if new updates need * used by the throttling code to decide if new updates need
...@@ -141,12 +150,47 @@ struct btrfs_delayed_ref_root { ...@@ -141,12 +150,47 @@ struct btrfs_delayed_ref_root {
u64 run_delayed_start; u64 run_delayed_start;
}; };
extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
int btrfs_delayed_ref_init(void);
void btrfs_delayed_ref_exit(void);
static inline struct btrfs_delayed_extent_op *
btrfs_alloc_delayed_extent_op(void)
{
return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
}
static inline void
btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
{
if (op)
kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
}
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref) static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
{ {
WARN_ON(atomic_read(&ref->refs) == 0); WARN_ON(atomic_read(&ref->refs) == 0);
if (atomic_dec_and_test(&ref->refs)) { if (atomic_dec_and_test(&ref->refs)) {
WARN_ON(ref->in_tree); WARN_ON(ref->in_tree);
kfree(ref); switch (ref->type) {
case BTRFS_TREE_BLOCK_REF_KEY:
case BTRFS_SHARED_BLOCK_REF_KEY:
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
break;
case BTRFS_EXTENT_DATA_REF_KEY:
case BTRFS_SHARED_DATA_REF_KEY:
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
break;
case 0:
kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
break;
default:
BUG();
}
} }
} }
...@@ -176,8 +220,14 @@ struct btrfs_delayed_ref_head * ...@@ -176,8 +220,14 @@ struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head); struct btrfs_delayed_ref_head *head);
static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
{
mutex_unlock(&head->mutex);
}
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
struct list_head *cluster, u64 search_start); struct list_head *cluster, u64 search_start);
void btrfs_release_ref_cluster(struct list_head *cluster);
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_delayed_ref_root *delayed_refs,
......
...@@ -465,7 +465,11 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, ...@@ -465,7 +465,11 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* flush all outstanding I/O and inode extent mappings before the * flush all outstanding I/O and inode extent mappings before the
* copy operation is declared as being finished * copy operation is declared as being finished
*/ */
btrfs_start_delalloc_inodes(root, 0); ret = btrfs_start_delalloc_inodes(root, 0);
if (ret) {
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return ret;
}
btrfs_wait_ordered_extents(root, 0); btrfs_wait_ordered_extents(root, 0);
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
......
This diff is collapsed.
...@@ -25,6 +25,13 @@ ...@@ -25,6 +25,13 @@
#define BTRFS_SUPER_MIRROR_MAX 3 #define BTRFS_SUPER_MIRROR_MAX 3
#define BTRFS_SUPER_MIRROR_SHIFT 12 #define BTRFS_SUPER_MIRROR_SHIFT 12
enum {
BTRFS_WQ_ENDIO_DATA = 0,
BTRFS_WQ_ENDIO_METADATA = 1,
BTRFS_WQ_ENDIO_FREE_SPACE = 2,
BTRFS_WQ_ENDIO_RAID56 = 3,
};
static inline u64 btrfs_sb_offset(int mirror) static inline u64 btrfs_sb_offset(int mirror)
{ {
u64 start = 16 * 1024; u64 start = 16 * 1024;
......
This diff is collapsed.
This diff is collapsed.
...@@ -72,10 +72,9 @@ struct extent_io_ops { ...@@ -72,10 +72,9 @@ struct extent_io_ops {
int (*writepage_start_hook)(struct page *page, u64 start, u64 end); int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
int (*writepage_io_hook)(struct page *page, u64 start, u64 end); int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
extent_submit_bio_hook_t *submit_bio_hook; extent_submit_bio_hook_t *submit_bio_hook;
int (*merge_bio_hook)(struct page *page, unsigned long offset, int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset,
size_t size, struct bio *bio, size_t size, struct bio *bio,
unsigned long bio_flags); unsigned long bio_flags);
int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror); int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
struct extent_state *state, int mirror); struct extent_state *state, int mirror);
...@@ -90,8 +89,6 @@ struct extent_io_ops { ...@@ -90,8 +89,6 @@ struct extent_io_ops {
struct extent_state *other); struct extent_state *other);
void (*split_extent_hook)(struct inode *inode, void (*split_extent_hook)(struct inode *inode,
struct extent_state *orig, u64 split); struct extent_state *orig, u64 split);
int (*write_cache_pages_lock_hook)(struct page *page, void *data,
void (*flush_fn)(void *));
}; };
struct extent_io_tree { struct extent_io_tree {
...@@ -161,8 +158,7 @@ struct extent_buffer { ...@@ -161,8 +158,7 @@ struct extent_buffer {
*/ */
wait_queue_head_t read_lock_wq; wait_queue_head_t read_lock_wq;
wait_queue_head_t lock_wq; wait_queue_head_t lock_wq;
struct page *inline_pages[INLINE_EXTENT_BUFFER_PAGES]; struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
struct page **pages;
}; };
static inline void extent_set_compress_type(unsigned long *bio_flags, static inline void extent_set_compress_type(unsigned long *bio_flags,
......
#include <linux/err.h> #include <linux/err.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/module.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include "ctree.h" #include "ctree.h"
......
...@@ -684,6 +684,24 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, ...@@ -684,6 +684,24 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
return ret; return ret;
} }
static u64 btrfs_sector_sum_left(struct btrfs_ordered_sum *sums,
struct btrfs_sector_sum *sector_sum,
u64 total_bytes, u64 sectorsize)
{
u64 tmp = sectorsize;
u64 next_sector = sector_sum->bytenr;
struct btrfs_sector_sum *next = sector_sum + 1;
while ((tmp + total_bytes) < sums->len) {
if (next_sector + sectorsize != next->bytenr)
break;
tmp += sectorsize;
next_sector = next->bytenr;
next++;
}
return tmp;
}
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_ordered_sum *sums) struct btrfs_ordered_sum *sums)
...@@ -789,20 +807,32 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, ...@@ -789,20 +807,32 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
goto insert; goto insert;
} }
if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) / if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
csum_size) { csum_size) {
u32 diff = (csum_offset + 1) * csum_size; int extend_nr;
u64 tmp;
u32 diff;
u32 free_space;
/* if (btrfs_leaf_free_space(root, leaf) <
* is the item big enough already? we dropped our lock sizeof(struct btrfs_item) + csum_size * 2)
* before and need to recheck goto insert;
*/
if (diff < btrfs_item_size_nr(leaf, path->slots[0])) free_space = btrfs_leaf_free_space(root, leaf) -
goto csum; sizeof(struct btrfs_item) - csum_size;
tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
root->sectorsize);
tmp >>= root->fs_info->sb->s_blocksize_bits;
WARN_ON(tmp < 1);
extend_nr = max_t(int, 1, (int)tmp);
diff = (csum_offset + extend_nr) * csum_size;
diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
if (diff != csum_size) diff = min(free_space, diff);
goto insert; diff /= csum_size;
diff *= csum_size;
btrfs_extend_item(trans, root, path, diff); btrfs_extend_item(trans, root, path, diff);
goto csum; goto csum;
...@@ -812,19 +842,14 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, ...@@ -812,19 +842,14 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
btrfs_release_path(path); btrfs_release_path(path);
csum_offset = 0; csum_offset = 0;
if (found_next) { if (found_next) {
u64 tmp = total_bytes + root->sectorsize; u64 tmp;
u64 next_sector = sector_sum->bytenr;
struct btrfs_sector_sum *next = sector_sum + 1;
while (tmp < sums->len) { tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
if (next_sector + root->sectorsize != next->bytenr) root->sectorsize);
break;
tmp += root->sectorsize;
next_sector = next->bytenr;
next++;
}
tmp = min(tmp, next_offset - file_key.offset);
tmp >>= root->fs_info->sb->s_blocksize_bits; tmp >>= root->fs_info->sb->s_blocksize_bits;
tmp = min(tmp, (next_offset - file_key.offset) >>
root->fs_info->sb->s_blocksize_bits);
tmp = max((u64)1, tmp); tmp = max((u64)1, tmp);
tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size)); tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
ins_size = csum_size * tmp; ins_size = csum_size * tmp;
......
...@@ -30,11 +30,11 @@ ...@@ -30,11 +30,11 @@
#include <linux/statfs.h> #include <linux/statfs.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/btrfs.h>
#include "ctree.h" #include "ctree.h"
#include "disk-io.h" #include "disk-io.h"
#include "transaction.h" #include "transaction.h"
#include "btrfs_inode.h" #include "btrfs_inode.h"
#include "ioctl.h"
#include "print-tree.h" #include "print-tree.h"
#include "tree-log.h" #include "tree-log.h"
#include "locking.h" #include "locking.h"
...@@ -374,6 +374,11 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) ...@@ -374,6 +374,11 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
atomic_inc(&fs_info->defrag_running); atomic_inc(&fs_info->defrag_running);
while(1) { while(1) {
/* Pause the auto defragger. */
if (test_bit(BTRFS_FS_STATE_REMOUNTING,
&fs_info->fs_state))
break;
if (!__need_auto_defrag(fs_info->tree_root)) if (!__need_auto_defrag(fs_info->tree_root))
break; break;
...@@ -505,8 +510,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, ...@@ -505,8 +510,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
loff_t isize = i_size_read(inode); loff_t isize = i_size_read(inode);
start_pos = pos & ~((u64)root->sectorsize - 1); start_pos = pos & ~((u64)root->sectorsize - 1);
num_bytes = (write_bytes + pos - start_pos + num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
end_of_last_block = start_pos + num_bytes - 1; end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
...@@ -1544,7 +1548,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, ...@@ -1544,7 +1548,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
* although we have opened a file as writable, we have * although we have opened a file as writable, we have
* to stop this write operation to ensure FS consistency. * to stop this write operation to ensure FS consistency.
*/ */
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
err = -EROFS; err = -EROFS;
goto out; goto out;
...@@ -1627,7 +1631,20 @@ int btrfs_release_file(struct inode *inode, struct file *filp) ...@@ -1627,7 +1631,20 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
*/ */
if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
&BTRFS_I(inode)->runtime_flags)) { &BTRFS_I(inode)->runtime_flags)) {
btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode); struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
/*
* We need to block on a committing transaction to keep us from
* throwing a ordered operation on to the list and causing
* something like sync to deadlock trying to flush out this
* inode.
*/
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
btrfs_end_transaction(trans, root);
if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
filemap_flush(inode->i_mapping); filemap_flush(inode->i_mapping);
} }
...@@ -1654,16 +1671,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ...@@ -1654,16 +1671,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0; int ret = 0;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
bool full_sync = 0;
trace_btrfs_sync_file(file, datasync); trace_btrfs_sync_file(file, datasync);
/* /*
* We write the dirty pages in the range and wait until they complete * We write the dirty pages in the range and wait until they complete
* out of the ->i_mutex. If so, we can flush the dirty pages by * out of the ->i_mutex. If so, we can flush the dirty pages by
* multi-task, and make the performance up. * multi-task, and make the performance up. See
* btrfs_wait_ordered_range for an explanation of the ASYNC check.
*/ */
atomic_inc(&BTRFS_I(inode)->sync_writers); atomic_inc(&BTRFS_I(inode)->sync_writers);
ret = filemap_write_and_wait_range(inode->i_mapping, start, end); ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
&BTRFS_I(inode)->runtime_flags))
ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
atomic_dec(&BTRFS_I(inode)->sync_writers); atomic_dec(&BTRFS_I(inode)->sync_writers);
if (ret) if (ret)
return ret; return ret;
...@@ -1675,6 +1697,9 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ...@@ -1675,6 +1697,9 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* range being left. * range being left.
*/ */
atomic_inc(&root->log_batch); atomic_inc(&root->log_batch);
full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
if (full_sync)
btrfs_wait_ordered_range(inode, start, end - start + 1); btrfs_wait_ordered_range(inode, start, end - start + 1);
atomic_inc(&root->log_batch); atomic_inc(&root->log_batch);
...@@ -1742,14 +1767,26 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ...@@ -1742,14 +1767,26 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (ret != BTRFS_NO_LOG_SYNC) { if (ret != BTRFS_NO_LOG_SYNC) {
if (ret > 0) { if (ret > 0) {
/*
* If we didn't already wait for ordered extents we need
* to do that now.
*/
if (!full_sync)
btrfs_wait_ordered_range(inode, start,
end - start + 1);
ret = btrfs_commit_transaction(trans, root); ret = btrfs_commit_transaction(trans, root);
} else { } else {
ret = btrfs_sync_log(trans, root); ret = btrfs_sync_log(trans, root);
if (ret == 0) if (ret == 0) {
ret = btrfs_end_transaction(trans, root); ret = btrfs_end_transaction(trans, root);
else } else {
if (!full_sync)
btrfs_wait_ordered_range(inode, start,
end -
start + 1);
ret = btrfs_commit_transaction(trans, root); ret = btrfs_commit_transaction(trans, root);
} }
}
} else { } else {
ret = btrfs_end_transaction(trans, root); ret = btrfs_end_transaction(trans, root);
} }
......
...@@ -1356,6 +1356,8 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) ...@@ -1356,6 +1356,8 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
max_bitmaps = max(max_bitmaps, 1);
BUG_ON(ctl->total_bitmaps > max_bitmaps); BUG_ON(ctl->total_bitmaps > max_bitmaps);
/* /*
...@@ -1463,10 +1465,14 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl, ...@@ -1463,10 +1465,14 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
} }
static struct btrfs_free_space * static struct btrfs_free_space *
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
unsigned long align)
{ {
struct btrfs_free_space *entry; struct btrfs_free_space *entry;
struct rb_node *node; struct rb_node *node;
u64 ctl_off;
u64 tmp;
u64 align_off;
int ret; int ret;
if (!ctl->free_space_offset.rb_node) if (!ctl->free_space_offset.rb_node)
...@@ -1481,15 +1487,34 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) ...@@ -1481,15 +1487,34 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
if (entry->bytes < *bytes) if (entry->bytes < *bytes)
continue; continue;
/* make sure the space returned is big enough
* to match our requested alignment
*/
if (*bytes >= align) {
ctl_off = entry->offset - ctl->start;
tmp = ctl_off + align - 1;;
do_div(tmp, align);
tmp = tmp * align + ctl->start;
align_off = tmp - entry->offset;
} else {
align_off = 0;
tmp = entry->offset;
}
if (entry->bytes < *bytes + align_off)
continue;
if (entry->bitmap) { if (entry->bitmap) {
ret = search_bitmap(ctl, entry, offset, bytes); ret = search_bitmap(ctl, entry, &tmp, bytes);
if (!ret) if (!ret) {
*offset = tmp;
return entry; return entry;
}
continue; continue;
} }
*offset = entry->offset; *offset = tmp;
*bytes = entry->bytes; *bytes = entry->bytes - align_off;
return entry; return entry;
} }
...@@ -1636,10 +1661,14 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, ...@@ -1636,10 +1661,14 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
} }
/* /*
* some block groups are so tiny they can't be enveloped by a bitmap, so * The original block groups from mkfs can be really small, like 8
* don't even bother to create a bitmap for this * megabytes, so don't bother with a bitmap for those entries. However
* some block groups can be smaller than what a bitmap would cover but
* are still large enough that they could overflow the 32k memory limit,
* so allow those block groups to still be allowed to have a bitmap
* entry.
*/ */
if (BITS_PER_BITMAP * ctl->unit > block_group->key.offset) if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
return false; return false;
return true; return true;
...@@ -2095,9 +2124,12 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ...@@ -2095,9 +2124,12 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *entry = NULL; struct btrfs_free_space *entry = NULL;
u64 bytes_search = bytes + empty_size; u64 bytes_search = bytes + empty_size;
u64 ret = 0; u64 ret = 0;
u64 align_gap = 0;
u64 align_gap_len = 0;
spin_lock(&ctl->tree_lock); spin_lock(&ctl->tree_lock);
entry = find_free_space(ctl, &offset, &bytes_search); entry = find_free_space(ctl, &offset, &bytes_search,
block_group->full_stripe_len);
if (!entry) if (!entry)
goto out; goto out;
...@@ -2107,9 +2139,15 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ...@@ -2107,9 +2139,15 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
if (!entry->bytes) if (!entry->bytes)
free_bitmap(ctl, entry); free_bitmap(ctl, entry);
} else { } else {
unlink_free_space(ctl, entry); unlink_free_space(ctl, entry);
entry->offset += bytes; align_gap_len = offset - entry->offset;
entry->bytes -= bytes; align_gap = entry->offset;
entry->offset = offset + bytes;
WARN_ON(entry->bytes < bytes + align_gap_len);
entry->bytes -= bytes + align_gap_len;
if (!entry->bytes) if (!entry->bytes)
kmem_cache_free(btrfs_free_space_cachep, entry); kmem_cache_free(btrfs_free_space_cachep, entry);
else else
...@@ -2119,6 +2157,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ...@@ -2119,6 +2157,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
out: out:
spin_unlock(&ctl->tree_lock); spin_unlock(&ctl->tree_lock);
if (align_gap_len)
__btrfs_add_free_space(ctl, align_gap, align_gap_len);
return ret; return ret;
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -113,11 +113,10 @@ void btrfs_tree_read_lock(struct extent_buffer *eb) ...@@ -113,11 +113,10 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
read_unlock(&eb->lock); read_unlock(&eb->lock);
return; return;
} }
read_unlock(&eb->lock);
wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
read_lock(&eb->lock);
if (atomic_read(&eb->blocking_writers)) { if (atomic_read(&eb->blocking_writers)) {
read_unlock(&eb->lock); read_unlock(&eb->lock);
wait_event(eb->write_lock_wq,
atomic_read(&eb->blocking_writers) == 0);
goto again; goto again;
} }
atomic_inc(&eb->read_locks); atomic_inc(&eb->read_locks);
......
This diff is collapsed.
...@@ -79,6 +79,8 @@ struct btrfs_ordered_sum { ...@@ -79,6 +79,8 @@ struct btrfs_ordered_sum {
#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates whether this ordered extent #define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates whether this ordered extent
* has done its due diligence in updating * has done its due diligence in updating
* the isize. */ * the isize. */
#define BTRFS_ORDERED_LOGGED_CSUM 8 /* We've logged the csums on this ordered
ordered extent */
struct btrfs_ordered_extent { struct btrfs_ordered_extent {
/* logical offset in the file */ /* logical offset in the file */
...@@ -96,6 +98,9 @@ struct btrfs_ordered_extent { ...@@ -96,6 +98,9 @@ struct btrfs_ordered_extent {
/* number of bytes that still need writing */ /* number of bytes that still need writing */
u64 bytes_left; u64 bytes_left;
/* number of bytes that still need csumming */
u64 csum_bytes_left;
/* /*
* the end of the ordered extent which is behind it but * the end of the ordered extent which is behind it but
* didn't update disk_i_size. Please see the comment of * didn't update disk_i_size. Please see the comment of
...@@ -118,6 +123,9 @@ struct btrfs_ordered_extent { ...@@ -118,6 +123,9 @@ struct btrfs_ordered_extent {
/* list of checksums for insertion when the extent io is done */ /* list of checksums for insertion when the extent io is done */
struct list_head list; struct list_head list;
/* If we need to wait on this to be done */
struct list_head log_list;
/* used to wait for the BTRFS_ORDERED_COMPLETE bit */ /* used to wait for the BTRFS_ORDERED_COMPLETE bit */
wait_queue_head_t wait; wait_queue_head_t wait;
...@@ -189,11 +197,15 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, ...@@ -189,11 +197,15 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered); struct btrfs_ordered_extent *ordered);
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
int btrfs_run_ordered_operations(struct btrfs_root *root, int wait); int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int wait);
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct inode *inode); struct inode *inode);
void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput); void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput);
void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode);
void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
int __init ordered_data_init(void); int __init ordered_data_init(void);
void ordered_data_exit(void); void ordered_data_exit(void);
#endif #endif
...@@ -294,6 +294,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) ...@@ -294,6 +294,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
btrfs_dev_extent_chunk_offset(l, dev_extent), btrfs_dev_extent_chunk_offset(l, dev_extent),
(unsigned long long) (unsigned long long)
btrfs_dev_extent_length(l, dev_extent)); btrfs_dev_extent_length(l, dev_extent));
break;
case BTRFS_DEV_STATS_KEY: case BTRFS_DEV_STATS_KEY:
printk(KERN_INFO "\t\tdevice stats\n"); printk(KERN_INFO "\t\tdevice stats\n");
break; break;
......
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (C) 2012 Fusion-io All rights reserved.
* Copyright (C) 2012 Intel Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef __BTRFS_RAID56__
#define __BTRFS_RAID56__
static inline int nr_parity_stripes(struct map_lookup *map)
{
if (map->type & BTRFS_BLOCK_GROUP_RAID5)
return 1;
else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
return 2;
else
return 0;
}
static inline int nr_data_stripes(struct map_lookup *map)
{
return map->num_stripes - nr_parity_stripes(map);
}
#define RAID5_P_STRIPE ((u64)-2)
#define RAID6_Q_STRIPE ((u64)-1)
#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \
((x) == RAID6_Q_STRIPE))
int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map,
u64 stripe_len, int mirror_num);
int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map,
u64 stripe_len);
int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -86,6 +86,7 @@ enum btrfs_send_cmd { ...@@ -86,6 +86,7 @@ enum btrfs_send_cmd {
BTRFS_SEND_C_UTIMES, BTRFS_SEND_C_UTIMES,
BTRFS_SEND_C_END, BTRFS_SEND_C_END,
BTRFS_SEND_C_UPDATE_EXTENT,
__BTRFS_SEND_C_MAX, __BTRFS_SEND_C_MAX,
}; };
#define BTRFS_SEND_C_MAX (__BTRFS_SEND_C_MAX - 1) #define BTRFS_SEND_C_MAX (__BTRFS_SEND_C_MAX - 1)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment