Commit b6c1f1ec authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.8-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:
 "A  more fixes for recently reported or discovered problems:

   - fix corner case of send that would generate potentially large
     stream of zeros if there's a hole at the end of the file

   - fix chunk validation in zoned mode on conventional zones, it was
     possible to create chunks that would not be allowed on sequential
     zones

   - fix validation of dev-replace ioctl filenames

   - fix KCSAN warnings about access to block reserve struct members"

* tag 'for-6.8-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: fix data race at btrfs_use_block_rsv() when accessing block reserve
  btrfs: fix data races when accessing the reserved amount of block reserves
  btrfs: send: don't issue unnecessary zero writes for trailing hole
  btrfs: dev-replace: properly validate device names
  btrfs: zoned: don't skip block group profile checks on conventional zones
parents c8e31462 c7bb26b8
...@@ -494,7 +494,7 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans, ...@@ -494,7 +494,7 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
block_rsv = get_block_rsv(trans, root); block_rsv = get_block_rsv(trans, root);
if (unlikely(block_rsv->size == 0)) if (unlikely(btrfs_block_rsv_size(block_rsv) == 0))
goto try_reserve; goto try_reserve;
again: again:
ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize); ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
......
...@@ -101,4 +101,36 @@ static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv) ...@@ -101,4 +101,36 @@ static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
return data_race(rsv->full); return data_race(rsv->full);
} }
/*
* Get the reserved mount of a block reserve in a context where getting a stale
* value is acceptable, instead of accessing it directly and trigger data race
* warning from KCSAN.
*/
static inline u64 btrfs_block_rsv_reserved(struct btrfs_block_rsv *rsv)
{
u64 ret;
spin_lock(&rsv->lock);
ret = rsv->reserved;
spin_unlock(&rsv->lock);
return ret;
}
/*
* Get the size of a block reserve in a context where getting a stale value is
* acceptable, instead of accessing it directly and trigger data race warning
* from KCSAN.
*/
static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
{
u64 ret;
spin_lock(&rsv->lock);
ret = rsv->size;
spin_unlock(&rsv->lock);
return ret;
}
#endif /* BTRFS_BLOCK_RSV_H */ #endif /* BTRFS_BLOCK_RSV_H */
...@@ -725,6 +725,23 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, ...@@ -725,6 +725,23 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
return ret; return ret;
} }
static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args)
{
if (args->start.srcdevid == 0) {
if (memchr(args->start.srcdev_name, 0,
sizeof(args->start.srcdev_name)) == NULL)
return -ENAMETOOLONG;
} else {
args->start.srcdev_name[0] = 0;
}
if (memchr(args->start.tgtdev_name, 0,
sizeof(args->start.tgtdev_name)) == NULL)
return -ENAMETOOLONG;
return 0;
}
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info, int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args) struct btrfs_ioctl_dev_replace_args *args)
{ {
...@@ -737,10 +754,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info, ...@@ -737,10 +754,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
default: default:
return -EINVAL; return -EINVAL;
} }
ret = btrfs_check_replace_dev_names(args);
if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') || if (ret < 0)
args->start.tgtdev_name[0] == '\0') return ret;
return -EINVAL;
ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name, ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
args->start.srcdevid, args->start.srcdevid,
......
...@@ -6705,11 +6705,20 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) ...@@ -6705,11 +6705,20 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
if (ret) if (ret)
goto out; goto out;
} }
if (sctx->cur_inode_last_extent < if (sctx->cur_inode_last_extent < sctx->cur_inode_size) {
sctx->cur_inode_size) { ret = range_is_hole_in_parent(sctx,
ret = send_hole(sctx, sctx->cur_inode_size); sctx->cur_inode_last_extent,
if (ret) sctx->cur_inode_size);
if (ret < 0) {
goto out; goto out;
} else if (ret == 0) {
ret = send_hole(sctx, sctx->cur_inode_size);
if (ret < 0)
goto out;
} else {
/* Range is already a hole, skip. */
ret = 0;
}
} }
} }
if (need_truncate) { if (need_truncate) {
......
...@@ -856,7 +856,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, ...@@ -856,7 +856,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info) struct btrfs_space_info *space_info)
{ {
u64 global_rsv_size = fs_info->global_block_rsv.reserved; const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
u64 ordered, delalloc; u64 ordered, delalloc;
u64 thresh; u64 thresh;
u64 used; u64 used;
...@@ -956,8 +956,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, ...@@ -956,8 +956,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
if (ordered >= delalloc) if (ordered >= delalloc)
used += fs_info->delayed_refs_rsv.reserved + used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
fs_info->delayed_block_rsv.reserved; btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
else else
used += space_info->bytes_may_use - global_rsv_size; used += space_info->bytes_may_use - global_rsv_size;
...@@ -1173,7 +1173,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) ...@@ -1173,7 +1173,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
enum btrfs_flush_state flush; enum btrfs_flush_state flush;
u64 delalloc_size = 0; u64 delalloc_size = 0;
u64 to_reclaim, block_rsv_size; u64 to_reclaim, block_rsv_size;
u64 global_rsv_size = global_rsv->reserved; const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
loops++; loops++;
...@@ -1185,9 +1185,9 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) ...@@ -1185,9 +1185,9 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
* assume it's tied up in delalloc reservations. * assume it's tied up in delalloc reservations.
*/ */
block_rsv_size = global_rsv_size + block_rsv_size = global_rsv_size +
delayed_block_rsv->reserved + btrfs_block_rsv_reserved(delayed_block_rsv) +
delayed_refs_rsv->reserved + btrfs_block_rsv_reserved(delayed_refs_rsv) +
trans_rsv->reserved; btrfs_block_rsv_reserved(trans_rsv);
if (block_rsv_size < space_info->bytes_may_use) if (block_rsv_size < space_info->bytes_may_use)
delalloc_size = space_info->bytes_may_use - block_rsv_size; delalloc_size = space_info->bytes_may_use - block_rsv_size;
...@@ -1207,16 +1207,16 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) ...@@ -1207,16 +1207,16 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
to_reclaim = delalloc_size; to_reclaim = delalloc_size;
flush = FLUSH_DELALLOC; flush = FLUSH_DELALLOC;
} else if (space_info->bytes_pinned > } else if (space_info->bytes_pinned >
(delayed_block_rsv->reserved + (btrfs_block_rsv_reserved(delayed_block_rsv) +
delayed_refs_rsv->reserved)) { btrfs_block_rsv_reserved(delayed_refs_rsv))) {
to_reclaim = space_info->bytes_pinned; to_reclaim = space_info->bytes_pinned;
flush = COMMIT_TRANS; flush = COMMIT_TRANS;
} else if (delayed_block_rsv->reserved > } else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
delayed_refs_rsv->reserved) { btrfs_block_rsv_reserved(delayed_refs_rsv)) {
to_reclaim = delayed_block_rsv->reserved; to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
flush = FLUSH_DELAYED_ITEMS_NR; flush = FLUSH_DELAYED_ITEMS_NR;
} else { } else {
to_reclaim = delayed_refs_rsv->reserved; to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
flush = FLUSH_DELAYED_REFS_NR; flush = FLUSH_DELAYED_REFS_NR;
} }
......
...@@ -1639,6 +1639,15 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) ...@@ -1639,6 +1639,15 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
} }
out: out:
/* Reject non SINGLE data profiles without RST */
if ((map->type & BTRFS_BLOCK_GROUP_DATA) &&
(map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
!fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
btrfs_bg_type_to_raid_name(map->type));
return -EINVAL;
}
if (cache->alloc_offset > cache->zone_capacity) { if (cache->alloc_offset > cache->zone_capacity) {
btrfs_err(fs_info, btrfs_err(fs_info,
"zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu", "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment