Commit cac405a3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.6-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:

 - delayed refs fixes:
     - fix race when refilling delayed refs block reserve
     - prevent transaction block reserve underflow when starting
       transaction
     - error message and value adjustments

 - fix build warnings with CONFIG_CC_OPTIMIZE_FOR_SIZE and
   -Wmaybe-uninitialized

 - fix for smatch report where uninitialized data from invalid extent
   buffer range could be returned to the caller

 - fix numeric overflow in statfs when calculating lower threshold
   for a full filesystem

* tag 'for-6.6-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: initialize start_slot in btrfs_log_prealloc_extents
  btrfs: make sure to initialize start and len in find_free_dev_extent
  btrfs: reset destination buffer when read_extent_buffer() gets invalid range
  btrfs: properly report 0 avail for very full file systems
  btrfs: log message if extent item not found when running delayed extent op
  btrfs: remove redundant BUG_ON() from __btrfs_inc_extent_ref()
  btrfs: return -EUCLEAN for delayed tree ref with a ref count not equals to 1
  btrfs: prevent transaction block reserve underflow when starting transaction
  btrfs: fix race when refilling delayed refs block reserve
parents 50768a42 b4c639f6
...@@ -103,24 +103,17 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans) ...@@ -103,24 +103,17 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
* Transfer bytes to our delayed refs rsv. * Transfer bytes to our delayed refs rsv.
* *
* @fs_info: the filesystem * @fs_info: the filesystem
* @src: source block rsv to transfer from
* @num_bytes: number of bytes to transfer * @num_bytes: number of bytes to transfer
* *
* This transfers up to the num_bytes amount from the src rsv to the * This transfers up to the num_bytes amount, previously reserved, to the
* delayed_refs_rsv. Any extra bytes are returned to the space info. * delayed_refs_rsv. Any extra bytes are returned to the space info.
*/ */
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info, void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *src,
u64 num_bytes) u64 num_bytes)
{ {
struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
u64 to_free = 0; u64 to_free = 0;
spin_lock(&src->lock);
src->reserved -= num_bytes;
src->size -= num_bytes;
spin_unlock(&src->lock);
spin_lock(&delayed_refs_rsv->lock); spin_lock(&delayed_refs_rsv->lock);
if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) { if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
u64 delta = delayed_refs_rsv->size - u64 delta = delayed_refs_rsv->size -
...@@ -163,6 +156,8 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, ...@@ -163,6 +156,8 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv; struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1); u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
u64 num_bytes = 0; u64 num_bytes = 0;
u64 refilled_bytes;
u64 to_free;
int ret = -ENOSPC; int ret = -ENOSPC;
spin_lock(&block_rsv->lock); spin_lock(&block_rsv->lock);
...@@ -178,9 +173,38 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, ...@@ -178,9 +173,38 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush); ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
if (ret) if (ret)
return ret; return ret;
btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", /*
0, num_bytes, 1); * We may have raced with someone else, so check again if we the block
* reserve is still not full and release any excess space.
*/
spin_lock(&block_rsv->lock);
if (block_rsv->reserved < block_rsv->size) {
u64 needed = block_rsv->size - block_rsv->reserved;
if (num_bytes >= needed) {
block_rsv->reserved += needed;
block_rsv->full = true;
to_free = num_bytes - needed;
refilled_bytes = needed;
} else {
block_rsv->reserved += num_bytes;
to_free = 0;
refilled_bytes = num_bytes;
}
} else {
to_free = num_bytes;
refilled_bytes = 0;
}
spin_unlock(&block_rsv->lock);
if (to_free > 0)
btrfs_space_info_free_bytes_may_use(fs_info, block_rsv->space_info,
to_free);
if (refilled_bytes > 0)
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
refilled_bytes, 1);
return 0; return 0;
} }
......
...@@ -407,7 +407,6 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans); ...@@ -407,7 +407,6 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush); enum btrfs_reserve_flush_enum flush);
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info, void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *src,
u64 num_bytes); u64 num_bytes);
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info); bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
......
...@@ -1514,15 +1514,14 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, ...@@ -1514,15 +1514,14 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
btrfs_release_path(path); btrfs_release_path(path);
/* now insert the actual backref */ /* now insert the actual backref */
if (owner < BTRFS_FIRST_FREE_OBJECTID) { if (owner < BTRFS_FIRST_FREE_OBJECTID)
BUG_ON(refs_to_add != 1);
ret = insert_tree_block_ref(trans, path, bytenr, parent, ret = insert_tree_block_ref(trans, path, bytenr, parent,
root_objectid); root_objectid);
} else { else
ret = insert_extent_data_ref(trans, path, bytenr, parent, ret = insert_extent_data_ref(trans, path, bytenr, parent,
root_objectid, owner, offset, root_objectid, owner, offset,
refs_to_add); refs_to_add);
}
if (ret) if (ret)
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
out: out:
...@@ -1656,7 +1655,10 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans, ...@@ -1656,7 +1655,10 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
goto again; goto again;
} }
} else { } else {
err = -EIO; err = -EUCLEAN;
btrfs_err(fs_info,
"missing extent item for extent %llu num_bytes %llu level %d",
head->bytenr, head->num_bytes, extent_op->level);
goto out; goto out;
} }
} }
...@@ -1699,12 +1701,12 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, ...@@ -1699,12 +1701,12 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
parent = ref->parent; parent = ref->parent;
ref_root = ref->root; ref_root = ref->root;
if (node->ref_mod != 1) { if (unlikely(node->ref_mod != 1)) {
btrfs_err(trans->fs_info, btrfs_err(trans->fs_info,
"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu", "btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu",
node->bytenr, node->ref_mod, node->action, ref_root, node->bytenr, node->ref_mod, node->action, ref_root,
parent); parent);
return -EIO; return -EUCLEAN;
} }
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
BUG_ON(!extent_op || !extent_op->update_flags); BUG_ON(!extent_op || !extent_op->update_flags);
......
...@@ -3995,8 +3995,14 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv, ...@@ -3995,8 +3995,14 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
char *dst = (char *)dstv; char *dst = (char *)dstv;
unsigned long i = get_eb_page_index(start); unsigned long i = get_eb_page_index(start);
if (check_eb_range(eb, start, len)) if (check_eb_range(eb, start, len)) {
/*
* Invalid range hit, reset the memory, so callers won't get
* some random garbage for their uninitialzed memory.
*/
memset(dstv, 0, len);
return; return;
}
offset = get_eb_offset_in_page(eb, start); offset = get_eb_offset_in_page(eb, start);
......
...@@ -2117,7 +2117,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) ...@@ -2117,7 +2117,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
* calculated f_bavail. * calculated f_bavail.
*/ */
if (!mixed && block_rsv->space_info->full && if (!mixed && block_rsv->space_info->full &&
total_free_meta - thresh < block_rsv->size) (total_free_meta < thresh || total_free_meta - thresh < block_rsv->size))
buf->f_bavail = 0; buf->f_bavail = 0;
buf->f_type = BTRFS_SUPER_MAGIC; buf->f_type = BTRFS_SUPER_MAGIC;
......
...@@ -631,14 +631,14 @@ start_transaction(struct btrfs_root *root, unsigned int num_items, ...@@ -631,14 +631,14 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
reloc_reserved = true; reloc_reserved = true;
} }
ret = btrfs_block_rsv_add(fs_info, rsv, num_bytes, flush); ret = btrfs_reserve_metadata_bytes(fs_info, rsv, num_bytes, flush);
if (ret) if (ret)
goto reserve_fail; goto reserve_fail;
if (delayed_refs_bytes) { if (delayed_refs_bytes) {
btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv, btrfs_migrate_to_delayed_refs_rsv(fs_info, delayed_refs_bytes);
delayed_refs_bytes);
num_bytes -= delayed_refs_bytes; num_bytes -= delayed_refs_bytes;
} }
btrfs_block_rsv_add_bytes(rsv, num_bytes, true);
if (rsv->space_info->force_alloc) if (rsv->space_info->force_alloc)
do_chunk_alloc = true; do_chunk_alloc = true;
......
...@@ -4722,7 +4722,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, ...@@ -4722,7 +4722,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf; struct extent_buffer *leaf;
int slot; int slot;
int ins_nr = 0; int ins_nr = 0;
int start_slot; int start_slot = 0;
int ret; int ret;
if (!(inode->flags & BTRFS_INODE_PREALLOC)) if (!(inode->flags & BTRFS_INODE_PREALLOC))
......
...@@ -1594,7 +1594,7 @@ static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, ...@@ -1594,7 +1594,7 @@ static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
u64 search_start; u64 search_start;
u64 hole_size; u64 hole_size;
u64 max_hole_start; u64 max_hole_start;
u64 max_hole_size; u64 max_hole_size = 0;
u64 extent_end; u64 extent_end;
u64 search_end = device->total_bytes; u64 search_end = device->total_bytes;
int ret; int ret;
...@@ -1602,17 +1602,16 @@ static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, ...@@ -1602,17 +1602,16 @@ static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
struct extent_buffer *l; struct extent_buffer *l;
search_start = dev_extent_search_start(device); search_start = dev_extent_search_start(device);
max_hole_start = search_start;
WARN_ON(device->zone_info && WARN_ON(device->zone_info &&
!IS_ALIGNED(num_bytes, device->zone_info->zone_size)); !IS_ALIGNED(num_bytes, device->zone_info->zone_size));
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path) {
return -ENOMEM; ret = -ENOMEM;
goto out;
max_hole_start = search_start; }
max_hole_size = 0;
again: again:
if (search_start >= search_end || if (search_start >= search_end ||
test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment