Commit e91eb620 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs cleanups and fixes from Chris Mason:
 "These are small cleanups, and also some fixes for our async worker
  thread initialization.

  I was having some trouble testing these, but it ended up being a
  combination of changing around my test servers and a shiny new
  schedule while atomic from the new start/finish_plug in
  writeback_sb_inodes().

  That one only hits on btrfs raid5/6 or MD raid10, and if I wasn't
  changing a bunch of things in my test setup at once it would have been
  really clear.  Fix for writeback_sb_inodes() on the way as well"

* 'for-linus-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  Btrfs: cleanup: remove unnecessary check before btrfs_free_path is called
  btrfs: async_thread: Fix workqueue 'max_active' value when initializing
  btrfs: Add raid56 support for updating  num_tolerated_disk_barrier_failures in btrfs_balance
  btrfs: Cleanup for btrfs_calc_num_tolerated_disk_barrier_failures
  btrfs: Remove noused chunk_tree and chunk_objectid from scrub_enumerate_chunks and scrub_chunk
  btrfs: Update out-of-date "skip parity stripe" comment
parents e013f74b 527afb44
......@@ -42,8 +42,14 @@ struct __btrfs_workqueue {
/* Thresholding related variants */
atomic_t pending;
int max_active;
int current_max;
/* Up limit of concurrency workers */
int limit_active;
/* Current number of concurrency workers */
int current_active;
/* Threshold to change current_active */
int thresh;
unsigned int count;
spinlock_t thres_lock;
......@@ -88,7 +94,7 @@ BTRFS_WORK_HELPER(scrubnc_helper);
BTRFS_WORK_HELPER(scrubparity_helper);
static struct __btrfs_workqueue *
__btrfs_alloc_workqueue(const char *name, unsigned int flags, int max_active,
__btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
int thresh)
{
struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
......@@ -96,26 +102,31 @@ __btrfs_alloc_workqueue(const char *name, unsigned int flags, int max_active,
if (!ret)
return NULL;
ret->max_active = max_active;
ret->limit_active = limit_active;
atomic_set(&ret->pending, 0);
if (thresh == 0)
thresh = DFT_THRESHOLD;
/* For low threshold, disabling threshold is a better choice */
if (thresh < DFT_THRESHOLD) {
ret->current_max = max_active;
ret->current_active = limit_active;
ret->thresh = NO_THRESHOLD;
} else {
ret->current_max = 1;
/*
* For threshold-able wq, let its concurrency grow on demand.
* Use minimal max_active at alloc time to reduce resource
* usage.
*/
ret->current_active = 1;
ret->thresh = thresh;
}
if (flags & WQ_HIGHPRI)
ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
ret->max_active,
"btrfs", name);
ret->current_active, "btrfs",
name);
else
ret->normal_wq = alloc_workqueue("%s-%s", flags,
ret->max_active, "btrfs",
ret->current_active, "btrfs",
name);
if (!ret->normal_wq) {
kfree(ret);
......@@ -134,7 +145,7 @@ __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
unsigned int flags,
int max_active,
int limit_active,
int thresh)
{
struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
......@@ -143,14 +154,14 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
return NULL;
ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
max_active, thresh);
limit_active, thresh);
if (!ret->normal) {
kfree(ret);
return NULL;
}
if (flags & WQ_HIGHPRI) {
ret->high = __btrfs_alloc_workqueue(name, flags, max_active,
ret->high = __btrfs_alloc_workqueue(name, flags, limit_active,
thresh);
if (!ret->high) {
__btrfs_destroy_workqueue(ret->normal);
......@@ -180,7 +191,7 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
*/
static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
{
int new_max_active;
int new_current_active;
long pending;
int need_change = 0;
......@@ -197,7 +208,7 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
wq->count %= (wq->thresh / 4);
if (!wq->count)
goto out;
new_max_active = wq->current_max;
new_current_active = wq->current_active;
/*
* pending may be changed later, but it's OK since we really
......@@ -205,19 +216,19 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
*/
pending = atomic_read(&wq->pending);
if (pending > wq->thresh)
new_max_active++;
new_current_active++;
if (pending < wq->thresh / 2)
new_max_active--;
new_max_active = clamp_val(new_max_active, 1, wq->max_active);
if (new_max_active != wq->current_max) {
new_current_active--;
new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
if (new_current_active != wq->current_active) {
need_change = 1;
wq->current_max = new_max_active;
wq->current_active = new_current_active;
}
out:
spin_unlock(&wq->thres_lock);
if (need_change) {
workqueue_set_max_active(wq->normal_wq, wq->current_max);
workqueue_set_max_active(wq->normal_wq, wq->current_active);
}
}
......@@ -351,13 +362,13 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
kfree(wq);
}
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
{
if (!wq)
return;
wq->normal->max_active = max;
wq->normal->limit_active = limit_active;
if (wq->high)
wq->high->max_active = max;
wq->high->limit_active = limit_active;
}
void btrfs_set_work_high_priority(struct btrfs_work *work)
......
......@@ -69,7 +69,7 @@ BTRFS_WORK_HELPER_PROTO(scrubparity_helper);
struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
unsigned int flags,
int max_active,
int limit_active,
int thresh);
void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
btrfs_func_t func,
......
......@@ -183,8 +183,7 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
}
out:
if (path)
btrfs_free_path(path);
btrfs_free_path(path);
return ret;
}
......
......@@ -3443,6 +3443,26 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
return 0;
}
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
{
if ((flags & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_AVAIL_ALLOC_BIT_SINGLE)) ||
((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0))
return 0;
if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID10))
return 1;
if (flags & BTRFS_BLOCK_GROUP_RAID6)
return 2;
pr_warn("BTRFS: unknown raid type: %llu\n", flags);
return 0;
}
int btrfs_calc_num_tolerated_disk_barrier_failures(
struct btrfs_fs_info *fs_info)
{
......@@ -3452,13 +3472,12 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
BTRFS_BLOCK_GROUP_SYSTEM,
BTRFS_BLOCK_GROUP_METADATA,
BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
int num_types = 4;
int i;
int c;
int num_tolerated_disk_barrier_failures =
(int)fs_info->fs_devices->num_devices;
for (i = 0; i < num_types; i++) {
for (i = 0; i < ARRAY_SIZE(types); i++) {
struct btrfs_space_info *tmp;
sinfo = NULL;
......@@ -3476,44 +3495,21 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
down_read(&sinfo->groups_sem);
for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
if (!list_empty(&sinfo->block_groups[c])) {
u64 flags;
btrfs_get_block_group_info(
&sinfo->block_groups[c], &space);
if (space.total_bytes == 0 ||
space.used_bytes == 0)
continue;
flags = space.flags;
/*
* return
* 0: if dup, single or RAID0 is configured for
* any of metadata, system or data, else
* 1: if RAID5 is configured, or if RAID1 or
* RAID10 is configured and only two mirrors
* are used, else
* 2: if RAID6 is configured, else
* num_mirrors - 1: if RAID1 or RAID10 is
* configured and more than
* 2 mirrors are used.
*/
if (num_tolerated_disk_barrier_failures > 0 &&
((flags & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID0)) ||
((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
== 0)))
num_tolerated_disk_barrier_failures = 0;
else if (num_tolerated_disk_barrier_failures > 1) {
if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID10)) {
num_tolerated_disk_barrier_failures = 1;
} else if (flags &
BTRFS_BLOCK_GROUP_RAID6) {
num_tolerated_disk_barrier_failures = 2;
}
}
}
u64 flags;
if (list_empty(&sinfo->block_groups[c]))
continue;
btrfs_get_block_group_info(&sinfo->block_groups[c],
&space);
if (space.total_bytes == 0 || space.used_bytes == 0)
continue;
flags = space.flags;
num_tolerated_disk_barrier_failures = min(
num_tolerated_disk_barrier_failures,
btrfs_get_num_tolerated_disk_barrier_failures(
flags));
}
up_read(&sinfo->groups_sem);
}
......
......@@ -139,6 +139,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
u64 objectid);
int btree_lock_page_hook(struct page *page, void *data,
void (*flush_fn)(void *));
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
int btrfs_calc_num_tolerated_disk_barrier_failures(
struct btrfs_fs_info *fs_info);
int __init btrfs_end_io_wq_init(void);
......
......@@ -6909,8 +6909,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
trace_btrfs_get_extent(root, em);
if (path)
btrfs_free_path(path);
btrfs_free_path(path);
if (trans) {
ret = btrfs_end_transaction(trans, root);
if (!err)
......
......@@ -3267,13 +3267,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
scrub_blocked_if_needed(fs_info);
}
/* for raid56, we skip parity stripe */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
ret = get_raid56_logic_offset(physical, num, map,
&logical,
&stripe_logical);
logical += base;
if (ret) {
/* it is parity strip */
stripe_logical += base;
stripe_end = stripe_logical + increment;
ret = scrub_raid56_parity(sctx, map, scrub_dev,
......@@ -3480,7 +3480,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
struct btrfs_device *scrub_dev,
u64 chunk_tree, u64 chunk_objectid,
u64 chunk_offset, u64 length,
u64 dev_offset, int is_dev_replace)
{
......@@ -3531,8 +3530,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
struct btrfs_root *root = sctx->dev_root;
struct btrfs_fs_info *fs_info = root->fs_info;
u64 length;
u64 chunk_tree;
u64 chunk_objectid;
u64 chunk_offset;
int ret = 0;
int slot;
......@@ -3596,8 +3593,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (found_key.offset + length <= start)
goto skip;
chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
/*
......@@ -3630,9 +3625,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
dev_replace->cursor_right = found_key.offset + length;
dev_replace->cursor_left = found_key.offset;
dev_replace->item_needs_writeback = 1;
ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
chunk_offset, length, found_key.offset,
is_dev_replace);
ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
found_key.offset, is_dev_replace);
/*
* flush, submit all pending read and write bios, afterwards
......
......@@ -115,8 +115,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
ret = -EAGAIN;
}
out:
if (path)
btrfs_free_path(path);
btrfs_free_path(path);
if (ret == -EAGAIN) {
if (root->defrag_max.objectid > root->defrag_progress.objectid)
goto done;
......
......@@ -3585,23 +3585,10 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
} while (read_seqretry(&fs_info->profiles_lock, seq));
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
int num_tolerated_disk_barrier_failures;
u64 target = bctl->sys.target;
num_tolerated_disk_barrier_failures =
btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
if (num_tolerated_disk_barrier_failures > 0 &&
(target &
(BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
num_tolerated_disk_barrier_failures = 0;
else if (num_tolerated_disk_barrier_failures > 1 &&
(target &
(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
num_tolerated_disk_barrier_failures = 1;
fs_info->num_tolerated_disk_barrier_failures =
num_tolerated_disk_barrier_failures;
fs_info->num_tolerated_disk_barrier_failures = min(
btrfs_calc_num_tolerated_disk_barrier_failures(fs_info),
btrfs_get_num_tolerated_disk_barrier_failures(
bctl->sys.target));
}
ret = insert_balance_item(fs_info->tree_root, bctl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment