Commit 9791581c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.11-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:
 "A few more one line fixes for various bugs, stable material.

   - fix send when emitting clone operation from the same file and root

   - fix double free on error when cleaning backrefs

   - lockdep fix during relocation

   - handle potential error during reloc when starting transaction

   - skip running delayed refs during commit (leftover from code removal
     in this dev cycle)"

* tag 'for-5.11-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: don't clear ret in btrfs_start_dirty_block_groups
  btrfs: fix lockdep splat in btrfs_recover_relocation
  btrfs: do not double free backref nodes on error
  btrfs: don't get an EINTR during drop_snapshot for reloc
  btrfs: send: fix invalid clone operations when cloning from the same file and root
  btrfs: no need to run delayed refs after commit_fs_roots during commit
parents 75439bc4 34d1eb0e
...@@ -3117,7 +3117,7 @@ void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache, ...@@ -3117,7 +3117,7 @@ void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
list_del_init(&lower->list); list_del_init(&lower->list);
if (lower == node) if (lower == node)
node = NULL; node = NULL;
btrfs_backref_free_node(cache, lower); btrfs_backref_drop_node(cache, lower);
} }
btrfs_backref_cleanup_node(cache, node); btrfs_backref_cleanup_node(cache, node);
......
...@@ -2669,6 +2669,7 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) ...@@ -2669,6 +2669,7 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
* Go through delayed refs for all the stuff we've just kicked off * Go through delayed refs for all the stuff we've just kicked off
* and then loop back (just once) * and then loop back (just once)
*/ */
if (!ret)
ret = btrfs_run_delayed_refs(trans, 0); ret = btrfs_run_delayed_refs(trans, 0);
if (!ret && loops == 0) { if (!ret && loops == 0) {
loops++; loops++;
......
...@@ -5549,6 +5549,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) ...@@ -5549,6 +5549,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
goto out_free; goto out_free;
} }
/*
* Use join to avoid potential EINTR from transaction
* start. See wait_reserve_ticket and the whole
* reservation callchain.
*/
if (for_reloc)
trans = btrfs_join_transaction(tree_root);
else
trans = btrfs_start_transaction(tree_root, 0); trans = btrfs_start_transaction(tree_root, 0);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
err = PTR_ERR(trans); err = PTR_ERR(trans);
......
...@@ -5512,6 +5512,21 @@ static int clone_range(struct send_ctx *sctx, ...@@ -5512,6 +5512,21 @@ static int clone_range(struct send_ctx *sctx,
break; break;
offset += clone_len; offset += clone_len;
clone_root->offset += clone_len; clone_root->offset += clone_len;
/*
* If we are cloning from the file we are currently processing,
* and using the send root as the clone root, we must stop once
* the current clone offset reaches the current eof of the file
* at the receiver, otherwise we would issue an invalid clone
* operation (source range going beyond eof) and cause the
* receiver to fail. So if we reach the current eof, bail out
* and fallback to a regular write.
*/
if (clone_root->root == sctx->send_root &&
clone_root->ino == sctx->cur_ino &&
clone_root->offset >= sctx->cur_inode_next_write_offset)
break;
data_offset += clone_len; data_offset += clone_len;
next: next:
path->slots[0]++; path->slots[0]++;
......
...@@ -2264,14 +2264,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) ...@@ -2264,14 +2264,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
*/ */
btrfs_free_log_root_tree(trans, fs_info); btrfs_free_log_root_tree(trans, fs_info);
/*
* commit_fs_roots() can call btrfs_save_ino_cache(), which generates
* new delayed refs. Must handle them or qgroup can be wrong.
*/
ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
if (ret)
goto unlock_tree_log;
/* /*
* Since fs roots are all committed, we can get a quite accurate * Since fs roots are all committed, we can get a quite accurate
* new_roots. So let's do quota accounting. * new_roots. So let's do quota accounting.
......
...@@ -4317,6 +4317,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info) ...@@ -4317,6 +4317,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
btrfs_warn(fs_info, btrfs_warn(fs_info,
"balance: cannot set exclusive op status, resume manually"); "balance: cannot set exclusive op status, resume manually");
btrfs_release_path(path);
mutex_lock(&fs_info->balance_mutex); mutex_lock(&fs_info->balance_mutex);
BUG_ON(fs_info->balance_ctl); BUG_ON(fs_info->balance_ctl);
spin_lock(&fs_info->balance_lock); spin_lock(&fs_info->balance_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment