Commit e942f883 authored by Chris Mason's avatar Chris Mason

Merge branch 'raid56-experimental' into for-linus-3.9

Signed-off-by: default avatarChris Mason <chris.mason@fusionio.com>

Conflicts:
	fs/btrfs/ctree.h
	fs/btrfs/extent-tree.c
	fs/btrfs/inode.c
	fs/btrfs/volumes.c
parents b2c6b3e0 0e4e0263
...@@ -6,6 +6,9 @@ config BTRFS_FS ...@@ -6,6 +6,9 @@ config BTRFS_FS
select ZLIB_DEFLATE select ZLIB_DEFLATE
select LZO_COMPRESS select LZO_COMPRESS
select LZO_DECOMPRESS select LZO_DECOMPRESS
select RAID6_PQ
select XOR_BLOCKS
help help
Btrfs is a new filesystem with extents, writable snapshotting, Btrfs is a new filesystem with extents, writable snapshotting,
support for multiple devices and many more features. support for multiple devices and many more features.
......
...@@ -8,7 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ ...@@ -8,7 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o \ export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
...@@ -372,7 +372,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, ...@@ -372,7 +372,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
page = compressed_pages[pg_index]; page = compressed_pages[pg_index];
page->mapping = inode->i_mapping; page->mapping = inode->i_mapping;
if (bio->bi_size) if (bio->bi_size)
ret = io_tree->ops->merge_bio_hook(page, 0, ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE,
bio, 0); bio, 0);
else else
...@@ -655,7 +655,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -655,7 +655,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
page->index = em_start >> PAGE_CACHE_SHIFT; page->index = em_start >> PAGE_CACHE_SHIFT;
if (comp_bio->bi_size) if (comp_bio->bi_size)
ret = tree->ops->merge_bio_hook(page, 0, ret = tree->ops->merge_bio_hook(READ, page, 0,
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE,
comp_bio, 0); comp_bio, 0);
else else
......
...@@ -506,6 +506,7 @@ struct btrfs_super_block { ...@@ -506,6 +506,7 @@ struct btrfs_super_block {
#define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5) #define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5)
#define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6) #define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6)
#define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7)
#define BTRFS_FEATURE_COMPAT_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_SUPP 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
...@@ -515,6 +516,7 @@ struct btrfs_super_block { ...@@ -515,6 +516,7 @@ struct btrfs_super_block {
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
BTRFS_FEATURE_INCOMPAT_RAID56 | \
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
/* /*
...@@ -956,6 +958,8 @@ struct btrfs_dev_replace_item { ...@@ -956,6 +958,8 @@ struct btrfs_dev_replace_item {
#define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4) #define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4)
#define BTRFS_BLOCK_GROUP_DUP (1ULL << 5) #define BTRFS_BLOCK_GROUP_DUP (1ULL << 5)
#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6) #define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6)
#define BTRFS_BLOCK_GROUP_RAID5 (1 << 7)
#define BTRFS_BLOCK_GROUP_RAID6 (1 << 8)
#define BTRFS_BLOCK_GROUP_RESERVED BTRFS_AVAIL_ALLOC_BIT_SINGLE #define BTRFS_BLOCK_GROUP_RESERVED BTRFS_AVAIL_ALLOC_BIT_SINGLE
enum btrfs_raid_types { enum btrfs_raid_types {
...@@ -964,6 +968,8 @@ enum btrfs_raid_types { ...@@ -964,6 +968,8 @@ enum btrfs_raid_types {
BTRFS_RAID_DUP, BTRFS_RAID_DUP,
BTRFS_RAID_RAID0, BTRFS_RAID_RAID0,
BTRFS_RAID_SINGLE, BTRFS_RAID_SINGLE,
BTRFS_RAID_RAID5,
BTRFS_RAID_RAID6,
BTRFS_NR_RAID_TYPES BTRFS_NR_RAID_TYPES
}; };
...@@ -973,6 +979,8 @@ enum btrfs_raid_types { ...@@ -973,6 +979,8 @@ enum btrfs_raid_types {
#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ #define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
BTRFS_BLOCK_GROUP_RAID1 | \ BTRFS_BLOCK_GROUP_RAID1 | \
BTRFS_BLOCK_GROUP_RAID5 | \
BTRFS_BLOCK_GROUP_RAID6 | \
BTRFS_BLOCK_GROUP_DUP | \ BTRFS_BLOCK_GROUP_DUP | \
BTRFS_BLOCK_GROUP_RAID10) BTRFS_BLOCK_GROUP_RAID10)
/* /*
...@@ -1197,6 +1205,10 @@ struct btrfs_block_group_cache { ...@@ -1197,6 +1205,10 @@ struct btrfs_block_group_cache {
u64 flags; u64 flags;
u64 sectorsize; u64 sectorsize;
u64 cache_generation; u64 cache_generation;
/* for raid56, this is a full stripe, without parity */
unsigned long full_stripe_len;
unsigned int ro:1; unsigned int ro:1;
unsigned int dirty:1; unsigned int dirty:1;
unsigned int iref:1; unsigned int iref:1;
...@@ -1242,6 +1254,23 @@ enum btrfs_orphan_cleanup_state { ...@@ -1242,6 +1254,23 @@ enum btrfs_orphan_cleanup_state {
ORPHAN_CLEANUP_DONE = 2, ORPHAN_CLEANUP_DONE = 2,
}; };
/* used by the raid56 code to lock stripes for read/modify/write */
struct btrfs_stripe_hash {
struct list_head hash_list;
wait_queue_head_t wait;
spinlock_t lock;
};
/* used by the raid56 code to lock stripes for read/modify/write */
struct btrfs_stripe_hash_table {
struct list_head stripe_cache;
spinlock_t cache_lock;
int cache_size;
struct btrfs_stripe_hash table[];
};
#define BTRFS_STRIPE_HASH_TABLE_BITS 11
/* fs_info */ /* fs_info */
struct reloc_control; struct reloc_control;
struct btrfs_device; struct btrfs_device;
...@@ -1341,6 +1370,13 @@ struct btrfs_fs_info { ...@@ -1341,6 +1370,13 @@ struct btrfs_fs_info {
struct mutex cleaner_mutex; struct mutex cleaner_mutex;
struct mutex chunk_mutex; struct mutex chunk_mutex;
struct mutex volume_mutex; struct mutex volume_mutex;
/* this is used during read/modify/write to make sure
* no two ios are trying to mod the same stripe at the same
* time
*/
struct btrfs_stripe_hash_table *stripe_hash_table;
/* /*
* this protects the ordered operations list only while we are * this protects the ordered operations list only while we are
* processing all of the entries on it. This way we make * processing all of the entries on it. This way we make
...@@ -1423,6 +1459,8 @@ struct btrfs_fs_info { ...@@ -1423,6 +1459,8 @@ struct btrfs_fs_info {
struct btrfs_workers flush_workers; struct btrfs_workers flush_workers;
struct btrfs_workers endio_workers; struct btrfs_workers endio_workers;
struct btrfs_workers endio_meta_workers; struct btrfs_workers endio_meta_workers;
struct btrfs_workers endio_raid56_workers;
struct btrfs_workers rmw_workers;
struct btrfs_workers endio_meta_write_workers; struct btrfs_workers endio_meta_write_workers;
struct btrfs_workers endio_write_workers; struct btrfs_workers endio_write_workers;
struct btrfs_workers endio_freespace_worker; struct btrfs_workers endio_freespace_worker;
...@@ -3490,9 +3528,9 @@ int btrfs_writepages(struct address_space *mapping, ...@@ -3490,9 +3528,9 @@ int btrfs_writepages(struct address_space *mapping,
struct writeback_control *wbc); struct writeback_control *wbc);
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root, u64 new_dirid); struct btrfs_root *new_root, u64 new_dirid);
int btrfs_merge_bio_hook(struct page *page, unsigned long offset, int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
size_t size, struct bio *bio, unsigned long bio_flags); size_t size, struct bio *bio,
unsigned long bio_flags);
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page); int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode); void btrfs_evict_inode(struct inode *inode);
......
...@@ -131,6 +131,15 @@ struct btrfs_delayed_ref_root { ...@@ -131,6 +131,15 @@ struct btrfs_delayed_ref_root {
/* total number of head nodes ready for processing */ /* total number of head nodes ready for processing */
unsigned long num_heads_ready; unsigned long num_heads_ready;
/*
* bumped when someone is making progress on the delayed
* refs, so that other procs know they are just adding to
* contention intead of helping
*/
atomic_t procs_running_refs;
atomic_t ref_seq;
wait_queue_head_t wait;
/* /*
* set when the tree is flushing before a transaction commit, * set when the tree is flushing before a transaction commit,
* used by the throttling code to decide if new updates need * used by the throttling code to decide if new updates need
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include "check-integrity.h" #include "check-integrity.h"
#include "rcu-string.h" #include "rcu-string.h"
#include "dev-replace.h" #include "dev-replace.h"
#include "raid56.h"
#ifdef CONFIG_X86 #ifdef CONFIG_X86
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
...@@ -640,8 +641,15 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, ...@@ -640,8 +641,15 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
btree_readahead_hook(root, eb, eb->start, ret); btree_readahead_hook(root, eb, eb->start, ret);
} }
if (ret) if (ret) {
/*
* our io error hook is going to dec the io pages
* again, we have to make sure it has something
* to decrement
*/
atomic_inc(&eb->io_pages);
clear_extent_buffer_uptodate(eb); clear_extent_buffer_uptodate(eb);
}
free_extent_buffer(eb); free_extent_buffer(eb);
out: out:
return ret; return ret;
...@@ -655,6 +663,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror) ...@@ -655,6 +663,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror)
eb = (struct extent_buffer *)page->private; eb = (struct extent_buffer *)page->private;
set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
eb->read_mirror = failed_mirror; eb->read_mirror = failed_mirror;
atomic_dec(&eb->io_pages);
if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
btree_readahead_hook(root, eb, eb->start, -EIO); btree_readahead_hook(root, eb, eb->start, -EIO);
return -EIO; /* we fixed nothing */ return -EIO; /* we fixed nothing */
...@@ -671,17 +680,23 @@ static void end_workqueue_bio(struct bio *bio, int err) ...@@ -671,17 +680,23 @@ static void end_workqueue_bio(struct bio *bio, int err)
end_io_wq->work.flags = 0; end_io_wq->work.flags = 0;
if (bio->bi_rw & REQ_WRITE) { if (bio->bi_rw & REQ_WRITE) {
if (end_io_wq->metadata == 1) if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
btrfs_queue_worker(&fs_info->endio_meta_write_workers, btrfs_queue_worker(&fs_info->endio_meta_write_workers,
&end_io_wq->work); &end_io_wq->work);
else if (end_io_wq->metadata == 2) else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
btrfs_queue_worker(&fs_info->endio_freespace_worker, btrfs_queue_worker(&fs_info->endio_freespace_worker,
&end_io_wq->work); &end_io_wq->work);
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
btrfs_queue_worker(&fs_info->endio_raid56_workers,
&end_io_wq->work);
else else
btrfs_queue_worker(&fs_info->endio_write_workers, btrfs_queue_worker(&fs_info->endio_write_workers,
&end_io_wq->work); &end_io_wq->work);
} else { } else {
if (end_io_wq->metadata) if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
btrfs_queue_worker(&fs_info->endio_raid56_workers,
&end_io_wq->work);
else if (end_io_wq->metadata)
btrfs_queue_worker(&fs_info->endio_meta_workers, btrfs_queue_worker(&fs_info->endio_meta_workers,
&end_io_wq->work); &end_io_wq->work);
else else
...@@ -696,6 +711,7 @@ static void end_workqueue_bio(struct bio *bio, int err) ...@@ -696,6 +711,7 @@ static void end_workqueue_bio(struct bio *bio, int err)
* 0 - if data * 0 - if data
* 1 - if normal metadta * 1 - if normal metadta
* 2 - if writing to the free space cache area * 2 - if writing to the free space cache area
* 3 - raid parity work
*/ */
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
int metadata) int metadata)
...@@ -2179,6 +2195,12 @@ int open_ctree(struct super_block *sb, ...@@ -2179,6 +2195,12 @@ int open_ctree(struct super_block *sb,
init_waitqueue_head(&fs_info->transaction_blocked_wait); init_waitqueue_head(&fs_info->transaction_blocked_wait);
init_waitqueue_head(&fs_info->async_submit_wait); init_waitqueue_head(&fs_info->async_submit_wait);
ret = btrfs_alloc_stripe_hash_table(fs_info);
if (ret) {
err = -ENOMEM;
goto fail_alloc;
}
__setup_root(4096, 4096, 4096, 4096, tree_root, __setup_root(4096, 4096, 4096, 4096, tree_root,
fs_info, BTRFS_ROOT_TREE_OBJECTID); fs_info, BTRFS_ROOT_TREE_OBJECTID);
...@@ -2349,6 +2371,12 @@ int open_ctree(struct super_block *sb, ...@@ -2349,6 +2371,12 @@ int open_ctree(struct super_block *sb,
btrfs_init_workers(&fs_info->endio_meta_write_workers, btrfs_init_workers(&fs_info->endio_meta_write_workers,
"endio-meta-write", fs_info->thread_pool_size, "endio-meta-write", fs_info->thread_pool_size,
&fs_info->generic_worker); &fs_info->generic_worker);
btrfs_init_workers(&fs_info->endio_raid56_workers,
"endio-raid56", fs_info->thread_pool_size,
&fs_info->generic_worker);
btrfs_init_workers(&fs_info->rmw_workers,
"rmw", fs_info->thread_pool_size,
&fs_info->generic_worker);
btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
fs_info->thread_pool_size, fs_info->thread_pool_size,
&fs_info->generic_worker); &fs_info->generic_worker);
...@@ -2367,6 +2395,8 @@ int open_ctree(struct super_block *sb, ...@@ -2367,6 +2395,8 @@ int open_ctree(struct super_block *sb,
*/ */
fs_info->endio_workers.idle_thresh = 4; fs_info->endio_workers.idle_thresh = 4;
fs_info->endio_meta_workers.idle_thresh = 4; fs_info->endio_meta_workers.idle_thresh = 4;
fs_info->endio_raid56_workers.idle_thresh = 4;
fs_info->rmw_workers.idle_thresh = 2;
fs_info->endio_write_workers.idle_thresh = 2; fs_info->endio_write_workers.idle_thresh = 2;
fs_info->endio_meta_write_workers.idle_thresh = 2; fs_info->endio_meta_write_workers.idle_thresh = 2;
...@@ -2383,6 +2413,8 @@ int open_ctree(struct super_block *sb, ...@@ -2383,6 +2413,8 @@ int open_ctree(struct super_block *sb,
ret |= btrfs_start_workers(&fs_info->fixup_workers); ret |= btrfs_start_workers(&fs_info->fixup_workers);
ret |= btrfs_start_workers(&fs_info->endio_workers); ret |= btrfs_start_workers(&fs_info->endio_workers);
ret |= btrfs_start_workers(&fs_info->endio_meta_workers); ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
ret |= btrfs_start_workers(&fs_info->rmw_workers);
ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers); ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
ret |= btrfs_start_workers(&fs_info->endio_write_workers); ret |= btrfs_start_workers(&fs_info->endio_write_workers);
ret |= btrfs_start_workers(&fs_info->endio_freespace_worker); ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
...@@ -2726,6 +2758,8 @@ int open_ctree(struct super_block *sb, ...@@ -2726,6 +2758,8 @@ int open_ctree(struct super_block *sb,
btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->endio_meta_workers); btrfs_stop_workers(&fs_info->endio_meta_workers);
btrfs_stop_workers(&fs_info->endio_raid56_workers);
btrfs_stop_workers(&fs_info->rmw_workers);
btrfs_stop_workers(&fs_info->endio_meta_write_workers); btrfs_stop_workers(&fs_info->endio_meta_write_workers);
btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->endio_write_workers);
btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->endio_freespace_worker);
...@@ -2747,6 +2781,7 @@ int open_ctree(struct super_block *sb, ...@@ -2747,6 +2781,7 @@ int open_ctree(struct super_block *sb,
fail_srcu: fail_srcu:
cleanup_srcu_struct(&fs_info->subvol_srcu); cleanup_srcu_struct(&fs_info->subvol_srcu);
fail: fail:
btrfs_free_stripe_hash_table(fs_info);
btrfs_close_devices(fs_info->fs_devices); btrfs_close_devices(fs_info->fs_devices);
return err; return err;
...@@ -3094,11 +3129,16 @@ int btrfs_calc_num_tolerated_disk_barrier_failures( ...@@ -3094,11 +3129,16 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
== 0))) == 0)))
num_tolerated_disk_barrier_failures = 0; num_tolerated_disk_barrier_failures = 0;
else if (num_tolerated_disk_barrier_failures > 1 else if (num_tolerated_disk_barrier_failures > 1) {
&& if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
(flags & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID10))) BTRFS_BLOCK_GROUP_RAID10)) {
num_tolerated_disk_barrier_failures = 1; num_tolerated_disk_barrier_failures = 1;
} else if (flags &
BTRFS_BLOCK_GROUP_RAID5) {
num_tolerated_disk_barrier_failures = 2;
}
}
} }
} }
up_read(&sinfo->groups_sem); up_read(&sinfo->groups_sem);
...@@ -3402,6 +3442,8 @@ int close_ctree(struct btrfs_root *root) ...@@ -3402,6 +3442,8 @@ int close_ctree(struct btrfs_root *root)
btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->endio_meta_workers); btrfs_stop_workers(&fs_info->endio_meta_workers);
btrfs_stop_workers(&fs_info->endio_raid56_workers);
btrfs_stop_workers(&fs_info->rmw_workers);
btrfs_stop_workers(&fs_info->endio_meta_write_workers); btrfs_stop_workers(&fs_info->endio_meta_write_workers);
btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->endio_write_workers);
btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->endio_freespace_worker);
...@@ -3424,6 +3466,8 @@ int close_ctree(struct btrfs_root *root) ...@@ -3424,6 +3466,8 @@ int close_ctree(struct btrfs_root *root)
bdi_destroy(&fs_info->bdi); bdi_destroy(&fs_info->bdi);
cleanup_srcu_struct(&fs_info->subvol_srcu); cleanup_srcu_struct(&fs_info->subvol_srcu);
btrfs_free_stripe_hash_table(fs_info);
return 0; return 0;
} }
......
...@@ -25,6 +25,13 @@ ...@@ -25,6 +25,13 @@
#define BTRFS_SUPER_MIRROR_MAX 3 #define BTRFS_SUPER_MIRROR_MAX 3
#define BTRFS_SUPER_MIRROR_SHIFT 12 #define BTRFS_SUPER_MIRROR_SHIFT 12
enum {
BTRFS_WQ_ENDIO_DATA = 0,
BTRFS_WQ_ENDIO_METADATA = 1,
BTRFS_WQ_ENDIO_FREE_SPACE = 2,
BTRFS_WQ_ENDIO_RAID56 = 3,
};
static inline u64 btrfs_sb_offset(int mirror) static inline u64 btrfs_sb_offset(int mirror)
{ {
u64 start = 16 * 1024; u64 start = 16 * 1024;
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "print-tree.h" #include "print-tree.h"
#include "transaction.h" #include "transaction.h"
#include "volumes.h" #include "volumes.h"
#include "raid56.h"
#include "locking.h" #include "locking.h"
#include "free-space-cache.h" #include "free-space-cache.h"
#include "math.h" #include "math.h"
...@@ -1852,6 +1853,8 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, ...@@ -1852,6 +1853,8 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
*actual_bytes = discarded_bytes; *actual_bytes = discarded_bytes;
if (ret == -EOPNOTSUPP)
ret = 0;
return ret; return ret;
} }
...@@ -2440,6 +2443,16 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans, ...@@ -2440,6 +2443,16 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
return ret; return ret;
} }
static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
int count)
{
int val = atomic_read(&delayed_refs->ref_seq);
if (val < seq || val >= seq + count)
return 1;
return 0;
}
/* /*
* this starts processing the delayed reference count updates and * this starts processing the delayed reference count updates and
* extent insertions we have queued up so far. count can be * extent insertions we have queued up so far. count can be
...@@ -2474,6 +2487,44 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -2474,6 +2487,44 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
delayed_refs = &trans->transaction->delayed_refs; delayed_refs = &trans->transaction->delayed_refs;
INIT_LIST_HEAD(&cluster); INIT_LIST_HEAD(&cluster);
if (count == 0) {
count = delayed_refs->num_entries * 2;
run_most = 1;
}
if (!run_all && !run_most) {
int old;
int seq = atomic_read(&delayed_refs->ref_seq);
progress:
old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
if (old) {
DEFINE_WAIT(__wait);
if (delayed_refs->num_entries < 16348)
return 0;
prepare_to_wait(&delayed_refs->wait, &__wait,
TASK_UNINTERRUPTIBLE);
old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
if (old) {
schedule();
finish_wait(&delayed_refs->wait, &__wait);
if (!refs_newer(delayed_refs, seq, 256))
goto progress;
else
return 0;
} else {
finish_wait(&delayed_refs->wait, &__wait);
goto again;
}
}
} else {
atomic_inc(&delayed_refs->procs_running_refs);
}
again: again:
loops = 0; loops = 0;
spin_lock(&delayed_refs->lock); spin_lock(&delayed_refs->lock);
...@@ -2482,10 +2533,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -2482,10 +2533,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
#endif #endif
if (count == 0) {
count = delayed_refs->num_entries * 2;
run_most = 1;
}
while (1) { while (1) {
if (!(run_all || run_most) && if (!(run_all || run_most) &&
delayed_refs->num_heads_ready < 64) delayed_refs->num_heads_ready < 64)
...@@ -2508,9 +2555,12 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -2508,9 +2555,12 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
btrfs_release_ref_cluster(&cluster); btrfs_release_ref_cluster(&cluster);
spin_unlock(&delayed_refs->lock); spin_unlock(&delayed_refs->lock);
btrfs_abort_transaction(trans, root, ret); btrfs_abort_transaction(trans, root, ret);
atomic_dec(&delayed_refs->procs_running_refs);
return ret; return ret;
} }
atomic_add(ret, &delayed_refs->ref_seq);
count -= min_t(unsigned long, ret, count); count -= min_t(unsigned long, ret, count);
if (count == 0) if (count == 0)
...@@ -2579,6 +2629,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -2579,6 +2629,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
goto again; goto again;
} }
out: out:
atomic_dec(&delayed_refs->procs_running_refs);
smp_mb();
if (waitqueue_active(&delayed_refs->wait))
wake_up(&delayed_refs->wait);
spin_unlock(&delayed_refs->lock); spin_unlock(&delayed_refs->lock);
assert_qgroups_uptodate(trans); assert_qgroups_uptodate(trans);
return 0; return 0;
...@@ -3284,6 +3339,7 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) ...@@ -3284,6 +3339,7 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
u64 num_devices = root->fs_info->fs_devices->rw_devices + u64 num_devices = root->fs_info->fs_devices->rw_devices +
root->fs_info->fs_devices->missing_devices; root->fs_info->fs_devices->missing_devices;
u64 target; u64 target;
u64 tmp;
/* /*
* see if restripe for this chunk_type is in progress, if so * see if restripe for this chunk_type is in progress, if so
...@@ -3300,30 +3356,32 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) ...@@ -3300,30 +3356,32 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
} }
spin_unlock(&root->fs_info->balance_lock); spin_unlock(&root->fs_info->balance_lock);
/* First, mask out the RAID levels which aren't possible */
if (num_devices == 1) if (num_devices == 1)
flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0); flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID5);
if (num_devices < 3)
flags &= ~BTRFS_BLOCK_GROUP_RAID6;
if (num_devices < 4) if (num_devices < 4)
flags &= ~BTRFS_BLOCK_GROUP_RAID10; flags &= ~BTRFS_BLOCK_GROUP_RAID10;
if ((flags & BTRFS_BLOCK_GROUP_DUP) && tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
(flags & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID10))) { BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
flags &= ~BTRFS_BLOCK_GROUP_DUP; flags &= ~tmp;
}
if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
(flags & BTRFS_BLOCK_GROUP_RAID10)) {
flags &= ~BTRFS_BLOCK_GROUP_RAID1;
}
if ((flags & BTRFS_BLOCK_GROUP_RAID0) && if (tmp & BTRFS_BLOCK_GROUP_RAID6)
((flags & BTRFS_BLOCK_GROUP_RAID1) | tmp = BTRFS_BLOCK_GROUP_RAID6;
(flags & BTRFS_BLOCK_GROUP_RAID10) | else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
(flags & BTRFS_BLOCK_GROUP_DUP))) { tmp = BTRFS_BLOCK_GROUP_RAID5;
flags &= ~BTRFS_BLOCK_GROUP_RAID0; else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
} tmp = BTRFS_BLOCK_GROUP_RAID10;
else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
tmp = BTRFS_BLOCK_GROUP_RAID1;
else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
tmp = BTRFS_BLOCK_GROUP_RAID0;
return extended_to_chunk(flags); return extended_to_chunk(flags | tmp);
} }
static u64 get_alloc_profile(struct btrfs_root *root, u64 flags) static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
...@@ -3347,6 +3405,7 @@ static u64 get_alloc_profile(struct btrfs_root *root, u64 flags) ...@@ -3347,6 +3405,7 @@ static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
{ {
u64 flags; u64 flags;
u64 ret;
if (data) if (data)
flags = BTRFS_BLOCK_GROUP_DATA; flags = BTRFS_BLOCK_GROUP_DATA;
...@@ -3355,7 +3414,8 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) ...@@ -3355,7 +3414,8 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
else else
flags = BTRFS_BLOCK_GROUP_METADATA; flags = BTRFS_BLOCK_GROUP_METADATA;
return get_alloc_profile(root, flags); ret = get_alloc_profile(root, flags);
return ret;
} }
/* /*
...@@ -3530,8 +3590,10 @@ static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type) ...@@ -3530,8 +3590,10 @@ static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
{ {
u64 num_dev; u64 num_dev;
if (type & BTRFS_BLOCK_GROUP_RAID10 || if (type & (BTRFS_BLOCK_GROUP_RAID10 |
type & BTRFS_BLOCK_GROUP_RAID0) BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6))
num_dev = root->fs_info->fs_devices->rw_devices; num_dev = root->fs_info->fs_devices->rw_devices;
else if (type & BTRFS_BLOCK_GROUP_RAID1) else if (type & BTRFS_BLOCK_GROUP_RAID1)
num_dev = 2; num_dev = 2;
...@@ -3706,7 +3768,9 @@ static int can_overcommit(struct btrfs_root *root, ...@@ -3706,7 +3768,9 @@ static int can_overcommit(struct btrfs_root *root,
/* /*
* If we have dup, raid1 or raid10 then only half of the free * If we have dup, raid1 or raid10 then only half of the free
* space is actually useable. * space is actually useable. For raid56, the space info used
* doesn't include the parity drive, so we don't have to
* change the math
*/ */
if (profile & (BTRFS_BLOCK_GROUP_DUP | if (profile & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID1 |
...@@ -5539,10 +5603,14 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, ...@@ -5539,10 +5603,14 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
return ret; return ret;
} }
static u64 stripe_align(struct btrfs_root *root, u64 val) static u64 stripe_align(struct btrfs_root *root,
struct btrfs_block_group_cache *cache,
u64 val, u64 num_bytes)
{ {
u64 mask = ((u64)root->stripesize - 1); u64 mask;
u64 ret = (val + mask) & ~mask; u64 ret;
mask = ((u64)root->stripesize - 1);
ret = (val + mask) & ~mask;
return ret; return ret;
} }
...@@ -5599,8 +5667,12 @@ int __get_raid_index(u64 flags) ...@@ -5599,8 +5667,12 @@ int __get_raid_index(u64 flags)
return BTRFS_RAID_DUP; return BTRFS_RAID_DUP;
else if (flags & BTRFS_BLOCK_GROUP_RAID0) else if (flags & BTRFS_BLOCK_GROUP_RAID0)
return BTRFS_RAID_RAID0; return BTRFS_RAID_RAID0;
else else if (flags & BTRFS_BLOCK_GROUP_RAID5)
return BTRFS_RAID_SINGLE; return BTRFS_RAID_RAID5;
else if (flags & BTRFS_BLOCK_GROUP_RAID6)
return BTRFS_RAID_RAID6;
return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
} }
static int get_block_group_index(struct btrfs_block_group_cache *cache) static int get_block_group_index(struct btrfs_block_group_cache *cache)
...@@ -5743,6 +5815,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, ...@@ -5743,6 +5815,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
if (!block_group_bits(block_group, data)) { if (!block_group_bits(block_group, data)) {
u64 extra = BTRFS_BLOCK_GROUP_DUP | u64 extra = BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6 |
BTRFS_BLOCK_GROUP_RAID10; BTRFS_BLOCK_GROUP_RAID10;
/* /*
...@@ -5771,6 +5845,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, ...@@ -5771,6 +5845,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
* lets look there * lets look there
*/ */
if (last_ptr) { if (last_ptr) {
unsigned long aligned_cluster;
/* /*
* the refill lock keeps out other * the refill lock keeps out other
* people trying to start a new cluster * people trying to start a new cluster
...@@ -5837,11 +5912,15 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, ...@@ -5837,11 +5912,15 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
goto unclustered_alloc; goto unclustered_alloc;
} }
aligned_cluster = max_t(unsigned long,
empty_cluster + empty_size,
block_group->full_stripe_len);
/* allocate a cluster in this block group */ /* allocate a cluster in this block group */
ret = btrfs_find_space_cluster(trans, root, ret = btrfs_find_space_cluster(trans, root,
block_group, last_ptr, block_group, last_ptr,
search_start, num_bytes, search_start, num_bytes,
empty_cluster + empty_size); aligned_cluster);
if (ret == 0) { if (ret == 0) {
/* /*
* now pull our allocation out of this * now pull our allocation out of this
...@@ -5912,7 +5991,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, ...@@ -5912,7 +5991,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
goto loop; goto loop;
} }
checks: checks:
search_start = stripe_align(root, offset); search_start = stripe_align(root, used_block_group,
offset, num_bytes);
/* move on to the next group */ /* move on to the next group */
if (search_start + num_bytes > if (search_start + num_bytes >
...@@ -7284,6 +7364,7 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) ...@@ -7284,6 +7364,7 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
root->fs_info->fs_devices->missing_devices; root->fs_info->fs_devices->missing_devices;
stripped = BTRFS_BLOCK_GROUP_RAID0 | stripped = BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10; BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
if (num_devices == 1) { if (num_devices == 1) {
...@@ -7837,7 +7918,9 @@ int btrfs_read_block_groups(struct btrfs_root *root) ...@@ -7837,7 +7918,9 @@ int btrfs_read_block_groups(struct btrfs_root *root)
btrfs_release_path(path); btrfs_release_path(path);
cache->flags = btrfs_block_group_flags(&cache->item); cache->flags = btrfs_block_group_flags(&cache->item);
cache->sectorsize = root->sectorsize; cache->sectorsize = root->sectorsize;
cache->full_stripe_len = btrfs_full_stripe_len(root,
&root->fs_info->mapping_tree,
found_key.objectid);
btrfs_init_free_space_ctl(cache); btrfs_init_free_space_ctl(cache);
/* /*
...@@ -7891,6 +7974,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) ...@@ -7891,6 +7974,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
if (!(get_alloc_profile(root, space_info->flags) & if (!(get_alloc_profile(root, space_info->flags) &
(BTRFS_BLOCK_GROUP_RAID10 | (BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6 |
BTRFS_BLOCK_GROUP_DUP))) BTRFS_BLOCK_GROUP_DUP)))
continue; continue;
/* /*
...@@ -7966,6 +8051,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, ...@@ -7966,6 +8051,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
cache->sectorsize = root->sectorsize; cache->sectorsize = root->sectorsize;
cache->fs_info = root->fs_info; cache->fs_info = root->fs_info;
cache->full_stripe_len = btrfs_full_stripe_len(root,
&root->fs_info->mapping_tree,
chunk_offset);
atomic_set(&cache->count, 1); atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock); spin_lock_init(&cache->lock);
......
...@@ -1895,13 +1895,11 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec, ...@@ -1895,13 +1895,11 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
if (ret) if (ret)
err = ret; err = ret;
if (did_repair) {
ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start, ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
rec->start + rec->len - 1, rec->start + rec->len - 1,
EXTENT_DAMAGED, GFP_NOFS); EXTENT_DAMAGED, GFP_NOFS);
if (ret && !err) if (ret && !err)
err = ret; err = ret;
}
kfree(rec); kfree(rec);
return err; return err;
...@@ -1932,10 +1930,15 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, ...@@ -1932,10 +1930,15 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
u64 map_length = 0; u64 map_length = 0;
u64 sector; u64 sector;
struct btrfs_bio *bbio = NULL; struct btrfs_bio *bbio = NULL;
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
int ret; int ret;
BUG_ON(!mirror_num); BUG_ON(!mirror_num);
/* we can't repair anything in raid56 yet */
if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
return 0;
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(GFP_NOFS, 1);
if (!bio) if (!bio)
return -EIO; return -EIO;
...@@ -2052,6 +2055,7 @@ static int clean_io_failure(u64 start, struct page *page) ...@@ -2052,6 +2055,7 @@ static int clean_io_failure(u64 start, struct page *page)
failrec->failed_mirror); failrec->failed_mirror);
did_repair = !ret; did_repair = !ret;
} }
ret = 0;
} }
out: out:
...@@ -2487,13 +2491,13 @@ static int __must_check submit_one_bio(int rw, struct bio *bio, ...@@ -2487,13 +2491,13 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
return ret; return ret;
} }
static int merge_bio(struct extent_io_tree *tree, struct page *page, static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
unsigned long offset, size_t size, struct bio *bio, unsigned long offset, size_t size, struct bio *bio,
unsigned long bio_flags) unsigned long bio_flags)
{ {
int ret = 0; int ret = 0;
if (tree->ops && tree->ops->merge_bio_hook) if (tree->ops && tree->ops->merge_bio_hook)
ret = tree->ops->merge_bio_hook(page, offset, size, bio, ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
bio_flags); bio_flags);
BUG_ON(ret < 0); BUG_ON(ret < 0);
return ret; return ret;
...@@ -2528,7 +2532,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, ...@@ -2528,7 +2532,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
sector; sector;
if (prev_bio_flags != bio_flags || !contig || if (prev_bio_flags != bio_flags || !contig ||
merge_bio(tree, page, offset, page_size, bio, bio_flags) || merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
bio_add_page(bio, page, page_size, offset) < page_size) { bio_add_page(bio, page, page_size, offset) < page_size) {
ret = submit_one_bio(rw, bio, mirror_num, ret = submit_one_bio(rw, bio, mirror_num,
prev_bio_flags); prev_bio_flags);
...@@ -4162,6 +4166,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) ...@@ -4162,6 +4166,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
static void check_buffer_tree_ref(struct extent_buffer *eb) static void check_buffer_tree_ref(struct extent_buffer *eb)
{ {
int refs;
/* the ref bit is tricky. We have to make sure it is set /* the ref bit is tricky. We have to make sure it is set
* if we have the buffer dirty. Otherwise the * if we have the buffer dirty. Otherwise the
* code to free a buffer can end up dropping a dirty * code to free a buffer can end up dropping a dirty
...@@ -4182,6 +4187,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb) ...@@ -4182,6 +4187,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
* So bump the ref count first, then set the bit. If someone * So bump the ref count first, then set the bit. If someone
* beat us to it, drop the ref we added. * beat us to it, drop the ref we added.
*/ */
refs = atomic_read(&eb->refs);
if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
return;
spin_lock(&eb->refs_lock); spin_lock(&eb->refs_lock);
if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
atomic_inc(&eb->refs); atomic_inc(&eb->refs);
...@@ -4383,9 +4392,20 @@ static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask) ...@@ -4383,9 +4392,20 @@ static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
void free_extent_buffer(struct extent_buffer *eb) void free_extent_buffer(struct extent_buffer *eb)
{ {
int refs;
int old;
if (!eb) if (!eb)
return; return;
while (1) {
refs = atomic_read(&eb->refs);
if (refs <= 3)
break;
old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
if (old == refs)
return;
}
spin_lock(&eb->refs_lock); spin_lock(&eb->refs_lock);
if (atomic_read(&eb->refs) == 2 && if (atomic_read(&eb->refs) == 2 &&
test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
......
...@@ -72,7 +72,7 @@ struct extent_io_ops { ...@@ -72,7 +72,7 @@ struct extent_io_ops {
int (*writepage_start_hook)(struct page *page, u64 start, u64 end); int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
int (*writepage_io_hook)(struct page *page, u64 start, u64 end); int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
extent_submit_bio_hook_t *submit_bio_hook; extent_submit_bio_hook_t *submit_bio_hook;
int (*merge_bio_hook)(struct page *page, unsigned long offset, int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset,
size_t size, struct bio *bio, size_t size, struct bio *bio,
unsigned long bio_flags); unsigned long bio_flags);
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror); int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
......
...@@ -1465,10 +1465,14 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl, ...@@ -1465,10 +1465,14 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
} }
static struct btrfs_free_space * static struct btrfs_free_space *
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
unsigned long align)
{ {
struct btrfs_free_space *entry; struct btrfs_free_space *entry;
struct rb_node *node; struct rb_node *node;
u64 ctl_off;
u64 tmp;
u64 align_off;
int ret; int ret;
if (!ctl->free_space_offset.rb_node) if (!ctl->free_space_offset.rb_node)
...@@ -1483,15 +1487,34 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) ...@@ -1483,15 +1487,34 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
if (entry->bytes < *bytes) if (entry->bytes < *bytes)
continue; continue;
/* make sure the space returned is big enough
* to match our requested alignment
*/
if (*bytes >= align) {
ctl_off = entry->offset - ctl->start;
tmp = ctl_off + align - 1;;
do_div(tmp, align);
tmp = tmp * align + ctl->start;
align_off = tmp - entry->offset;
} else {
align_off = 0;
tmp = entry->offset;
}
if (entry->bytes < *bytes + align_off)
continue;
if (entry->bitmap) { if (entry->bitmap) {
ret = search_bitmap(ctl, entry, offset, bytes); ret = search_bitmap(ctl, entry, &tmp, bytes);
if (!ret) if (!ret) {
*offset = tmp;
return entry; return entry;
}
continue; continue;
} }
*offset = entry->offset; *offset = tmp;
*bytes = entry->bytes; *bytes = entry->bytes - align_off;
return entry; return entry;
} }
...@@ -2101,9 +2124,12 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ...@@ -2101,9 +2124,12 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *entry = NULL; struct btrfs_free_space *entry = NULL;
u64 bytes_search = bytes + empty_size; u64 bytes_search = bytes + empty_size;
u64 ret = 0; u64 ret = 0;
u64 align_gap = 0;
u64 align_gap_len = 0;
spin_lock(&ctl->tree_lock); spin_lock(&ctl->tree_lock);
entry = find_free_space(ctl, &offset, &bytes_search); entry = find_free_space(ctl, &offset, &bytes_search,
block_group->full_stripe_len);
if (!entry) if (!entry)
goto out; goto out;
...@@ -2113,9 +2139,15 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ...@@ -2113,9 +2139,15 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
if (!entry->bytes) if (!entry->bytes)
free_bitmap(ctl, entry); free_bitmap(ctl, entry);
} else { } else {
unlink_free_space(ctl, entry); unlink_free_space(ctl, entry);
entry->offset += bytes; align_gap_len = offset - entry->offset;
entry->bytes -= bytes; align_gap = entry->offset;
entry->offset = offset + bytes;
WARN_ON(entry->bytes < bytes + align_gap_len);
entry->bytes -= bytes + align_gap_len;
if (!entry->bytes) if (!entry->bytes)
kmem_cache_free(btrfs_free_space_cachep, entry); kmem_cache_free(btrfs_free_space_cachep, entry);
else else
...@@ -2125,6 +2157,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ...@@ -2125,6 +2157,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
out: out:
spin_unlock(&ctl->tree_lock); spin_unlock(&ctl->tree_lock);
if (align_gap_len)
__btrfs_add_free_space(ctl, align_gap, align_gap_len);
return ret; return ret;
} }
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/btrfs.h> #include <linux/btrfs.h>
#include <linux/blkdev.h>
#include "compat.h" #include "compat.h"
#include "ctree.h" #include "ctree.h"
#include "disk-io.h" #include "disk-io.h"
...@@ -1605,7 +1606,7 @@ static void btrfs_clear_bit_hook(struct inode *inode, ...@@ -1605,7 +1606,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
* we don't create bios that span stripes or chunks * we don't create bios that span stripes or chunks
*/ */
int btrfs_merge_bio_hook(struct page *page, unsigned long offset, int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
size_t size, struct bio *bio, size_t size, struct bio *bio,
unsigned long bio_flags) unsigned long bio_flags)
{ {
...@@ -1620,7 +1621,7 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, ...@@ -1620,7 +1621,7 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
length = bio->bi_size; length = bio->bi_size;
map_length = length; map_length = length;
ret = btrfs_map_block(root->fs_info, READ, logical, ret = btrfs_map_block(root->fs_info, rw, logical,
&map_length, NULL, 0); &map_length, NULL, 0);
/* Will always return 0 with map_multi == NULL */ /* Will always return 0 with map_multi == NULL */
BUG_ON(ret < 0); BUG_ON(ret < 0);
...@@ -6464,19 +6465,24 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, ...@@ -6464,19 +6465,24 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int async_submit = 0; int async_submit = 0;
map_length = orig_bio->bi_size; map_length = orig_bio->bi_size;
ret = btrfs_map_block(root->fs_info, READ, start_sector << 9, ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
&map_length, NULL, 0); &map_length, NULL, 0);
if (ret) { if (ret) {
bio_put(orig_bio); bio_put(orig_bio);
return -EIO; return -EIO;
} }
if (map_length >= orig_bio->bi_size) { if (map_length >= orig_bio->bi_size) {
bio = orig_bio; bio = orig_bio;
goto submit; goto submit;
} }
/* async crcs make it difficult to collect full stripe writes. */
if (btrfs_get_alloc_profile(root, 1) &
(BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
async_submit = 0;
else
async_submit = 1; async_submit = 1;
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
if (!bio) if (!bio)
return -ENOMEM; return -ENOMEM;
...@@ -6518,7 +6524,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, ...@@ -6518,7 +6524,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
bio->bi_end_io = btrfs_end_dio_bio; bio->bi_end_io = btrfs_end_dio_bio;
map_length = orig_bio->bi_size; map_length = orig_bio->bi_size;
ret = btrfs_map_block(root->fs_info, READ, ret = btrfs_map_block(root->fs_info, rw,
start_sector << 9, start_sector << 9,
&map_length, NULL, 0); &map_length, NULL, 0);
if (ret) { if (ret) {
......
This diff is collapsed.
/*
* Copyright (C) 2012 Fusion-io All rights reserved.
* Copyright (C) 2012 Intel Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef __BTRFS_RAID56__
#define __BTRFS_RAID56__
static inline int nr_parity_stripes(struct map_lookup *map)
{
if (map->type & BTRFS_BLOCK_GROUP_RAID5)
return 1;
else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
return 2;
else
return 0;
}
static inline int nr_data_stripes(struct map_lookup *map)
{
return map->num_stripes - nr_parity_stripes(map);
}
#define RAID5_P_STRIPE ((u64)-2)
#define RAID6_Q_STRIPE ((u64)-1)
#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \
((x) == RAID6_Q_STRIPE))
int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map,
u64 stripe_len, int mirror_num);
int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
struct btrfs_bio *bbio, u64 *raid_map,
u64 stripe_len);
int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
#endif
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "dev-replace.h" #include "dev-replace.h"
#include "check-integrity.h" #include "check-integrity.h"
#include "rcu-string.h" #include "rcu-string.h"
#include "raid56.h"
/* /*
* This is only the first step towards a full-features scrub. It reads all * This is only the first step towards a full-features scrub. It reads all
...@@ -2254,6 +2255,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -2254,6 +2255,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
struct btrfs_device *extent_dev; struct btrfs_device *extent_dev;
int extent_mirror_num; int extent_mirror_num;
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6)) {
if (num >= nr_data_stripes(map)) {
return 0;
}
}
nstripes = length; nstripes = length;
offset = 0; offset = 0;
do_div(nstripes, map->stripe_len); do_div(nstripes, map->stripe_len);
......
...@@ -167,6 +167,9 @@ static noinline int join_transaction(struct btrfs_root *root, int type) ...@@ -167,6 +167,9 @@ static noinline int join_transaction(struct btrfs_root *root, int type)
spin_lock_init(&cur_trans->commit_lock); spin_lock_init(&cur_trans->commit_lock);
spin_lock_init(&cur_trans->delayed_refs.lock); spin_lock_init(&cur_trans->delayed_refs.lock);
atomic_set(&cur_trans->delayed_refs.procs_running_refs, 0);
atomic_set(&cur_trans->delayed_refs.ref_seq, 0);
init_waitqueue_head(&cur_trans->delayed_refs.wait);
INIT_LIST_HEAD(&cur_trans->pending_snapshots); INIT_LIST_HEAD(&cur_trans->pending_snapshots);
INIT_LIST_HEAD(&cur_trans->ordered_operations); INIT_LIST_HEAD(&cur_trans->ordered_operations);
...@@ -637,7 +640,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, ...@@ -637,7 +640,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
if (!list_empty(&trans->new_bgs)) if (!list_empty(&trans->new_bgs))
btrfs_create_pending_block_groups(trans, root); btrfs_create_pending_block_groups(trans, root);
while (count < 2) { while (count < 1) {
unsigned long cur = trans->delayed_ref_updates; unsigned long cur = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0; trans->delayed_ref_updates = 0;
if (cur && if (cur &&
...@@ -649,6 +652,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, ...@@ -649,6 +652,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
} }
count++; count++;
} }
btrfs_trans_release_metadata(trans, root); btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL; trans->block_rsv = NULL;
...@@ -744,7 +748,9 @@ int btrfs_write_marked_extents(struct btrfs_root *root, ...@@ -744,7 +748,9 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
u64 start = 0; u64 start = 0;
u64 end; u64 end;
struct blk_plug plug;
blk_start_plug(&plug);
while (!find_first_extent_bit(dirty_pages, start, &start, &end, while (!find_first_extent_bit(dirty_pages, start, &start, &end,
mark, &cached_state)) { mark, &cached_state)) {
convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
...@@ -758,6 +764,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root, ...@@ -758,6 +764,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
} }
if (err) if (err)
werr = err; werr = err;
blk_finish_plug(&plug);
return werr; return werr;
} }
......
This diff is collapsed.
...@@ -321,7 +321,14 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, ...@@ -321,7 +321,14 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info, void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
struct btrfs_device *tgtdev); struct btrfs_device *tgtdev);
int btrfs_scratch_superblock(struct btrfs_device *device); int btrfs_scratch_superblock(struct btrfs_device *device);
void btrfs_schedule_bio(struct btrfs_root *root,
struct btrfs_device *device,
int rw, struct bio *bio);
int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
u64 logical, u64 len, int mirror_num);
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
struct btrfs_mapping_tree *map_tree,
u64 logical);
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev, static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
int index) int index)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment