Commit e96f8f18 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs fixes from Chris Mason:
 "These are all over the place.

  The tracepoint part of the pull fixes a crash and adds a little more
  information to two tracepoints, while the rest are good old fashioned
  fixes"

* 'for-linus-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  btrfs: make tracepoint format strings more compact
  Btrfs: add truncated_len for ordered extent tracepoints
  Btrfs: add 'inode' for extent map tracepoint
  btrfs: fix crash when tracepoint arguments are freed by wq callbacks
  Btrfs: adjust outstanding_extents counter properly when dio write is split
  Btrfs: fix lockdep warning about log_mutex
  Btrfs: use down_read_nested to make lockdep silent
  btrfs: fix locking when we put back a delayed ref that's too new
  btrfs: fix error handling when run_delayed_extent_op fails
  btrfs: return the actual error value from  from btrfs_uuid_tree_iterate
parents 04e39627 0bf70aeb
......@@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
unsigned long flags;
while (1) {
void *wtag;
spin_lock_irqsave(lock, flags);
if (list_empty(list))
break;
......@@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
spin_unlock_irqrestore(lock, flags);
/*
* we don't want to call the ordered free functions
* with the lock held though
* We don't want to call the ordered free functions with the
* lock held though. Save the work as tag for the trace event,
* because the callback could free the structure.
*/
wtag = work;
work->ordered_free(work);
trace_btrfs_all_work_done(work);
trace_btrfs_all_work_done(wq->fs_info, wtag);
}
spin_unlock_irqrestore(lock, flags);
}
......@@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
static void normal_work_helper(struct btrfs_work *work)
{
struct __btrfs_workqueue *wq;
void *wtag;
int need_order = 0;
/*
......@@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work)
if (work->ordered_func)
need_order = 1;
wq = work->wq;
/* Safe for tracepoints in case work gets freed by the callback */
wtag = work;
trace_btrfs_work_sched(work);
thresh_exec_hook(wq);
......@@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work)
run_ordered_work(wq);
}
if (!need_order)
trace_btrfs_all_work_done(work);
trace_btrfs_all_work_done(wq->fs_info, wtag);
}
void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
......
......@@ -2522,11 +2522,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (ref && ref->seq &&
btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
spin_unlock(&locked_ref->lock);
btrfs_delayed_ref_unlock(locked_ref);
spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
delayed_refs->num_heads_ready++;
spin_unlock(&delayed_refs->lock);
btrfs_delayed_ref_unlock(locked_ref);
locked_ref = NULL;
cond_resched();
count++;
......@@ -2572,7 +2572,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
*/
if (must_insert_reserved)
locked_ref->must_insert_reserved = 1;
spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
delayed_refs->num_heads_ready++;
spin_unlock(&delayed_refs->lock);
btrfs_debug(fs_info,
"run_delayed_extent_op returned %d",
ret);
......@@ -7384,7 +7387,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
spin_unlock(&cluster->refill_lock);
down_read(&used_bg->data_rwsem);
/* We should only have one-level nested. */
down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
spin_lock(&cluster->refill_lock);
if (used_bg == cluster->block_group)
......
......@@ -7059,7 +7059,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
write_unlock(&em_tree->lock);
out:
trace_btrfs_get_extent(root, em);
trace_btrfs_get_extent(root, inode, em);
btrfs_free_path(path);
if (trans) {
......@@ -7623,11 +7623,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode,
* within our reservation, otherwise we need to adjust our inode
* counter appropriately.
*/
if (dio_data->outstanding_extents) {
if (dio_data->outstanding_extents >= num_extents) {
dio_data->outstanding_extents -= num_extents;
} else {
/*
* If dio write length has been split due to no large enough
* contiguous space, we need to compensate our inode counter
* appropriately.
*/
u64 num_needed = num_extents - dio_data->outstanding_extents;
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents += num_extents;
BTRFS_I(inode)->outstanding_extents += num_needed;
spin_unlock(&BTRFS_I(inode)->lock);
}
}
......
......@@ -37,6 +37,7 @@
*/
#define LOG_INODE_ALL 0
#define LOG_INODE_EXISTS 1
#define LOG_OTHER_INODE 2
/*
* directory trouble cases
......@@ -4641,7 +4642,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
if (S_ISDIR(inode->i_mode) ||
(!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags) &&
inode_only == LOG_INODE_EXISTS))
inode_only >= LOG_INODE_EXISTS))
max_key.type = BTRFS_XATTR_ITEM_KEY;
else
max_key.type = (u8)-1;
......@@ -4665,7 +4666,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
return ret;
}
mutex_lock(&BTRFS_I(inode)->log_mutex);
if (inode_only == LOG_OTHER_INODE) {
inode_only = LOG_INODE_EXISTS;
mutex_lock_nested(&BTRFS_I(inode)->log_mutex,
SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&BTRFS_I(inode)->log_mutex);
}
/*
* a brute force approach to making sure we get the most uptodate
......@@ -4817,7 +4824,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
* unpin it.
*/
err = btrfs_log_inode(trans, root, other_inode,
LOG_INODE_EXISTS,
LOG_OTHER_INODE,
0, LLONG_MAX, ctx);
iput(other_inode);
if (err)
......
......@@ -352,7 +352,5 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
out:
btrfs_free_path(path);
if (ret)
btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret);
return 0;
return ret;
}
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment