Commit e91eb620 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs cleanups and fixes from Chris Mason:
 "These are small cleanups, and also some fixes for our async worker
  thread initialization.

  I was having some trouble testing these, but it ended up being a
  combination of changing around my test servers and a shiny new
  schedule while atomic from the new start/finish_plug in
  writeback_sb_inodes().

  That one only hits on btrfs raid5/6 or MD raid10, and if I wasn't
  changing a bunch of things in my test setup at once it would have been
  really clear.  Fix for writeback_sb_inodes() on the way as well"

* 'for-linus-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  Btrfs: cleanup: remove unnecessary check before btrfs_free_path is called
  btrfs: async_thread: Fix workqueue 'max_active' value when initializing
  btrfs: Add raid56 support for updating  num_tolerated_disk_barrier_failures in btrfs_balance
  btrfs: Cleanup for btrfs_calc_num_tolerated_disk_barrier_failures
  btrfs: Remove noused chunk_tree and chunk_objectid from scrub_enumerate_chunks and scrub_chunk
  btrfs: Update out-of-date "skip parity stripe" comment
parents e013f74b 527afb44
...@@ -42,8 +42,14 @@ struct __btrfs_workqueue { ...@@ -42,8 +42,14 @@ struct __btrfs_workqueue {
/* Thresholding related variants */ /* Thresholding related variants */
atomic_t pending; atomic_t pending;
int max_active;
int current_max; /* Up limit of concurrency workers */
int limit_active;
/* Current number of concurrency workers */
int current_active;
/* Threshold to change current_active */
int thresh; int thresh;
unsigned int count; unsigned int count;
spinlock_t thres_lock; spinlock_t thres_lock;
...@@ -88,7 +94,7 @@ BTRFS_WORK_HELPER(scrubnc_helper); ...@@ -88,7 +94,7 @@ BTRFS_WORK_HELPER(scrubnc_helper);
BTRFS_WORK_HELPER(scrubparity_helper); BTRFS_WORK_HELPER(scrubparity_helper);
static struct __btrfs_workqueue * static struct __btrfs_workqueue *
__btrfs_alloc_workqueue(const char *name, unsigned int flags, int max_active, __btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
int thresh) int thresh)
{ {
struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
...@@ -96,26 +102,31 @@ __btrfs_alloc_workqueue(const char *name, unsigned int flags, int max_active, ...@@ -96,26 +102,31 @@ __btrfs_alloc_workqueue(const char *name, unsigned int flags, int max_active,
if (!ret) if (!ret)
return NULL; return NULL;
ret->max_active = max_active; ret->limit_active = limit_active;
atomic_set(&ret->pending, 0); atomic_set(&ret->pending, 0);
if (thresh == 0) if (thresh == 0)
thresh = DFT_THRESHOLD; thresh = DFT_THRESHOLD;
/* For low threshold, disabling threshold is a better choice */ /* For low threshold, disabling threshold is a better choice */
if (thresh < DFT_THRESHOLD) { if (thresh < DFT_THRESHOLD) {
ret->current_max = max_active; ret->current_active = limit_active;
ret->thresh = NO_THRESHOLD; ret->thresh = NO_THRESHOLD;
} else { } else {
ret->current_max = 1; /*
* For threshold-able wq, let its concurrency grow on demand.
* Use minimal max_active at alloc time to reduce resource
* usage.
*/
ret->current_active = 1;
ret->thresh = thresh; ret->thresh = thresh;
} }
if (flags & WQ_HIGHPRI) if (flags & WQ_HIGHPRI)
ret->normal_wq = alloc_workqueue("%s-%s-high", flags, ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
ret->max_active, ret->current_active, "btrfs",
"btrfs", name); name);
else else
ret->normal_wq = alloc_workqueue("%s-%s", flags, ret->normal_wq = alloc_workqueue("%s-%s", flags,
ret->max_active, "btrfs", ret->current_active, "btrfs",
name); name);
if (!ret->normal_wq) { if (!ret->normal_wq) {
kfree(ret); kfree(ret);
...@@ -134,7 +145,7 @@ __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); ...@@ -134,7 +145,7 @@ __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
unsigned int flags, unsigned int flags,
int max_active, int limit_active,
int thresh) int thresh)
{ {
struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
...@@ -143,14 +154,14 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, ...@@ -143,14 +154,14 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
return NULL; return NULL;
ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI, ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
max_active, thresh); limit_active, thresh);
if (!ret->normal) { if (!ret->normal) {
kfree(ret); kfree(ret);
return NULL; return NULL;
} }
if (flags & WQ_HIGHPRI) { if (flags & WQ_HIGHPRI) {
ret->high = __btrfs_alloc_workqueue(name, flags, max_active, ret->high = __btrfs_alloc_workqueue(name, flags, limit_active,
thresh); thresh);
if (!ret->high) { if (!ret->high) {
__btrfs_destroy_workqueue(ret->normal); __btrfs_destroy_workqueue(ret->normal);
...@@ -180,7 +191,7 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) ...@@ -180,7 +191,7 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
*/ */
static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
{ {
int new_max_active; int new_current_active;
long pending; long pending;
int need_change = 0; int need_change = 0;
...@@ -197,7 +208,7 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) ...@@ -197,7 +208,7 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
wq->count %= (wq->thresh / 4); wq->count %= (wq->thresh / 4);
if (!wq->count) if (!wq->count)
goto out; goto out;
new_max_active = wq->current_max; new_current_active = wq->current_active;
/* /*
* pending may be changed later, but it's OK since we really * pending may be changed later, but it's OK since we really
...@@ -205,19 +216,19 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) ...@@ -205,19 +216,19 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
*/ */
pending = atomic_read(&wq->pending); pending = atomic_read(&wq->pending);
if (pending > wq->thresh) if (pending > wq->thresh)
new_max_active++; new_current_active++;
if (pending < wq->thresh / 2) if (pending < wq->thresh / 2)
new_max_active--; new_current_active--;
new_max_active = clamp_val(new_max_active, 1, wq->max_active); new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
if (new_max_active != wq->current_max) { if (new_current_active != wq->current_active) {
need_change = 1; need_change = 1;
wq->current_max = new_max_active; wq->current_active = new_current_active;
} }
out: out:
spin_unlock(&wq->thres_lock); spin_unlock(&wq->thres_lock);
if (need_change) { if (need_change) {
workqueue_set_max_active(wq->normal_wq, wq->current_max); workqueue_set_max_active(wq->normal_wq, wq->current_active);
} }
} }
...@@ -351,13 +362,13 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) ...@@ -351,13 +362,13 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
kfree(wq); kfree(wq);
} }
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max) void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
{ {
if (!wq) if (!wq)
return; return;
wq->normal->max_active = max; wq->normal->limit_active = limit_active;
if (wq->high) if (wq->high)
wq->high->max_active = max; wq->high->limit_active = limit_active;
} }
void btrfs_set_work_high_priority(struct btrfs_work *work) void btrfs_set_work_high_priority(struct btrfs_work *work)
......
...@@ -69,7 +69,7 @@ BTRFS_WORK_HELPER_PROTO(scrubparity_helper); ...@@ -69,7 +69,7 @@ BTRFS_WORK_HELPER_PROTO(scrubparity_helper);
struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
unsigned int flags, unsigned int flags,
int max_active, int limit_active,
int thresh); int thresh);
void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper, void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
btrfs_func_t func, btrfs_func_t func,
......
...@@ -183,7 +183,6 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info) ...@@ -183,7 +183,6 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
} }
out: out:
if (path)
btrfs_free_path(path); btrfs_free_path(path);
return ret; return ret;
} }
......
...@@ -3443,6 +3443,26 @@ static int barrier_all_devices(struct btrfs_fs_info *info) ...@@ -3443,6 +3443,26 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
return 0; return 0;
} }
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
{
if ((flags & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_AVAIL_ALLOC_BIT_SINGLE)) ||
((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0))
return 0;
if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID10))
return 1;
if (flags & BTRFS_BLOCK_GROUP_RAID6)
return 2;
pr_warn("BTRFS: unknown raid type: %llu\n", flags);
return 0;
}
int btrfs_calc_num_tolerated_disk_barrier_failures( int btrfs_calc_num_tolerated_disk_barrier_failures(
struct btrfs_fs_info *fs_info) struct btrfs_fs_info *fs_info)
{ {
...@@ -3452,13 +3472,12 @@ int btrfs_calc_num_tolerated_disk_barrier_failures( ...@@ -3452,13 +3472,12 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
BTRFS_BLOCK_GROUP_SYSTEM, BTRFS_BLOCK_GROUP_SYSTEM,
BTRFS_BLOCK_GROUP_METADATA, BTRFS_BLOCK_GROUP_METADATA,
BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA}; BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
int num_types = 4;
int i; int i;
int c; int c;
int num_tolerated_disk_barrier_failures = int num_tolerated_disk_barrier_failures =
(int)fs_info->fs_devices->num_devices; (int)fs_info->fs_devices->num_devices;
for (i = 0; i < num_types; i++) { for (i = 0; i < ARRAY_SIZE(types); i++) {
struct btrfs_space_info *tmp; struct btrfs_space_info *tmp;
sinfo = NULL; sinfo = NULL;
...@@ -3476,44 +3495,21 @@ int btrfs_calc_num_tolerated_disk_barrier_failures( ...@@ -3476,44 +3495,21 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
down_read(&sinfo->groups_sem); down_read(&sinfo->groups_sem);
for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
if (!list_empty(&sinfo->block_groups[c])) {
u64 flags; u64 flags;
btrfs_get_block_group_info( if (list_empty(&sinfo->block_groups[c]))
&sinfo->block_groups[c], &space); continue;
if (space.total_bytes == 0 ||
space.used_bytes == 0) btrfs_get_block_group_info(&sinfo->block_groups[c],
&space);
if (space.total_bytes == 0 || space.used_bytes == 0)
continue; continue;
flags = space.flags; flags = space.flags;
/*
* return num_tolerated_disk_barrier_failures = min(
* 0: if dup, single or RAID0 is configured for num_tolerated_disk_barrier_failures,
* any of metadata, system or data, else btrfs_get_num_tolerated_disk_barrier_failures(
* 1: if RAID5 is configured, or if RAID1 or flags));
* RAID10 is configured and only two mirrors
* are used, else
* 2: if RAID6 is configured, else
* num_mirrors - 1: if RAID1 or RAID10 is
* configured and more than
* 2 mirrors are used.
*/
if (num_tolerated_disk_barrier_failures > 0 &&
((flags & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID0)) ||
((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
== 0)))
num_tolerated_disk_barrier_failures = 0;
else if (num_tolerated_disk_barrier_failures > 1) {
if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID10)) {
num_tolerated_disk_barrier_failures = 1;
} else if (flags &
BTRFS_BLOCK_GROUP_RAID6) {
num_tolerated_disk_barrier_failures = 2;
}
}
}
} }
up_read(&sinfo->groups_sem); up_read(&sinfo->groups_sem);
} }
......
...@@ -139,6 +139,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, ...@@ -139,6 +139,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
u64 objectid); u64 objectid);
int btree_lock_page_hook(struct page *page, void *data, int btree_lock_page_hook(struct page *page, void *data,
void (*flush_fn)(void *)); void (*flush_fn)(void *));
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
int btrfs_calc_num_tolerated_disk_barrier_failures( int btrfs_calc_num_tolerated_disk_barrier_failures(
struct btrfs_fs_info *fs_info); struct btrfs_fs_info *fs_info);
int __init btrfs_end_io_wq_init(void); int __init btrfs_end_io_wq_init(void);
......
...@@ -6909,7 +6909,6 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, ...@@ -6909,7 +6909,6 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
trace_btrfs_get_extent(root, em); trace_btrfs_get_extent(root, em);
if (path)
btrfs_free_path(path); btrfs_free_path(path);
if (trans) { if (trans) {
ret = btrfs_end_transaction(trans, root); ret = btrfs_end_transaction(trans, root);
......
...@@ -3267,13 +3267,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3267,13 +3267,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
scrub_blocked_if_needed(fs_info); scrub_blocked_if_needed(fs_info);
} }
/* for raid56, we skip parity stripe */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
ret = get_raid56_logic_offset(physical, num, map, ret = get_raid56_logic_offset(physical, num, map,
&logical, &logical,
&stripe_logical); &stripe_logical);
logical += base; logical += base;
if (ret) { if (ret) {
/* it is parity strip */
stripe_logical += base; stripe_logical += base;
stripe_end = stripe_logical + increment; stripe_end = stripe_logical + increment;
ret = scrub_raid56_parity(sctx, map, scrub_dev, ret = scrub_raid56_parity(sctx, map, scrub_dev,
...@@ -3480,7 +3480,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ...@@ -3480,7 +3480,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev, struct btrfs_device *scrub_dev,
u64 chunk_tree, u64 chunk_objectid,
u64 chunk_offset, u64 length, u64 chunk_offset, u64 length,
u64 dev_offset, int is_dev_replace) u64 dev_offset, int is_dev_replace)
{ {
...@@ -3531,8 +3530,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ...@@ -3531,8 +3530,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
struct btrfs_root *root = sctx->dev_root; struct btrfs_root *root = sctx->dev_root;
struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_fs_info *fs_info = root->fs_info;
u64 length; u64 length;
u64 chunk_tree;
u64 chunk_objectid;
u64 chunk_offset; u64 chunk_offset;
int ret = 0; int ret = 0;
int slot; int slot;
...@@ -3596,8 +3593,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ...@@ -3596,8 +3593,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (found_key.offset + length <= start) if (found_key.offset + length <= start)
goto skip; goto skip;
chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
/* /*
...@@ -3630,9 +3625,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ...@@ -3630,9 +3625,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
dev_replace->cursor_right = found_key.offset + length; dev_replace->cursor_right = found_key.offset + length;
dev_replace->cursor_left = found_key.offset; dev_replace->cursor_left = found_key.offset;
dev_replace->item_needs_writeback = 1; dev_replace->item_needs_writeback = 1;
ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid, ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
chunk_offset, length, found_key.offset, found_key.offset, is_dev_replace);
is_dev_replace);
/* /*
* flush, submit all pending read and write bios, afterwards * flush, submit all pending read and write bios, afterwards
......
...@@ -115,7 +115,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, ...@@ -115,7 +115,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
ret = -EAGAIN; ret = -EAGAIN;
} }
out: out:
if (path)
btrfs_free_path(path); btrfs_free_path(path);
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
if (root->defrag_max.objectid > root->defrag_progress.objectid) if (root->defrag_max.objectid > root->defrag_progress.objectid)
......
...@@ -3585,23 +3585,10 @@ int btrfs_balance(struct btrfs_balance_control *bctl, ...@@ -3585,23 +3585,10 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
} while (read_seqretry(&fs_info->profiles_lock, seq)); } while (read_seqretry(&fs_info->profiles_lock, seq));
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
int num_tolerated_disk_barrier_failures; fs_info->num_tolerated_disk_barrier_failures = min(
u64 target = bctl->sys.target; btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
btrfs_get_num_tolerated_disk_barrier_failures(
num_tolerated_disk_barrier_failures = bctl->sys.target));
btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
if (num_tolerated_disk_barrier_failures > 0 &&
(target &
(BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
num_tolerated_disk_barrier_failures = 0;
else if (num_tolerated_disk_barrier_failures > 1 &&
(target &
(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
num_tolerated_disk_barrier_failures = 1;
fs_info->num_tolerated_disk_barrier_failures =
num_tolerated_disk_barrier_failures;
} }
ret = insert_balance_item(fs_info->tree_root, bctl); ret = insert_balance_item(fs_info->tree_root, bctl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment