Commit 28405268 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.11-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:

 - fix use-after-free when submitting bios for read, after an error and
   partially submitted bio the original one is freed while it can be
   still be accessed again

 - fix fstests case btrfs/301, with enabled quotas wait for delayed
   iputs when flushing delalloc

 - fix periodic block group reclaim, an unitialized value can be
   returned if there are no block groups to reclaim

 - fix build warning (-Wmaybe-uninitialized)

* tag 'for-6.11-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: fix uninitialized return value from btrfs_reclaim_sweep()
  btrfs: fix a use-after-free when hitting errors inside btrfs_submit_chunk()
  btrfs: initialize last_extent_end to fix -Wmaybe-uninitialized warning in extent_fiemap()
  btrfs: run delayed iputs when flushing delalloc
parents 86987d84 ecb54277
...@@ -668,7 +668,6 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) ...@@ -668,7 +668,6 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
{ {
struct btrfs_inode *inode = bbio->inode; struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = bbio->fs_info; struct btrfs_fs_info *fs_info = bbio->fs_info;
struct btrfs_bio *orig_bbio = bbio;
struct bio *bio = &bbio->bio; struct bio *bio = &bbio->bio;
u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT; u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
u64 length = bio->bi_iter.bi_size; u64 length = bio->bi_iter.bi_size;
...@@ -706,7 +705,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) ...@@ -706,7 +705,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
bbio->saved_iter = bio->bi_iter; bbio->saved_iter = bio->bi_iter;
ret = btrfs_lookup_bio_sums(bbio); ret = btrfs_lookup_bio_sums(bbio);
if (ret) if (ret)
goto fail_put_bio; goto fail;
} }
if (btrfs_op(bio) == BTRFS_MAP_WRITE) { if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
...@@ -740,13 +739,13 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) ...@@ -740,13 +739,13 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
ret = btrfs_bio_csum(bbio); ret = btrfs_bio_csum(bbio);
if (ret) if (ret)
goto fail_put_bio; goto fail;
} else if (use_append || } else if (use_append ||
(btrfs_is_zoned(fs_info) && inode && (btrfs_is_zoned(fs_info) && inode &&
inode->flags & BTRFS_INODE_NODATASUM)) { inode->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_alloc_dummy_sum(bbio); ret = btrfs_alloc_dummy_sum(bbio);
if (ret) if (ret)
goto fail_put_bio; goto fail;
} }
} }
...@@ -754,12 +753,23 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) ...@@ -754,12 +753,23 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
done: done:
return map_length == length; return map_length == length;
fail_put_bio:
if (map_length < length)
btrfs_cleanup_bio(bbio);
fail: fail:
btrfs_bio_counter_dec(fs_info); btrfs_bio_counter_dec(fs_info);
btrfs_bio_end_io(orig_bbio, ret); /*
* We have split the original bbio, now we have to end both the current
* @bbio and remaining one, as the remaining one will never be submitted.
*/
if (map_length < length) {
struct btrfs_bio *remaining = bbio->private;
ASSERT(bbio->bio.bi_pool == &btrfs_clone_bioset);
ASSERT(remaining);
remaining->bio.bi_status = ret;
btrfs_orig_bbio_end_io(remaining);
}
bbio->bio.bi_status = ret;
btrfs_orig_bbio_end_io(bbio);
/* Do not submit another chunk */ /* Do not submit another chunk */
return true; return true;
} }
......
...@@ -637,7 +637,7 @@ static int extent_fiemap(struct btrfs_inode *inode, ...@@ -637,7 +637,7 @@ static int extent_fiemap(struct btrfs_inode *inode,
struct btrfs_path *path; struct btrfs_path *path;
struct fiemap_cache cache = { 0 }; struct fiemap_cache cache = { 0 };
struct btrfs_backref_share_check_ctx *backref_ctx; struct btrfs_backref_share_check_ctx *backref_ctx;
u64 last_extent_end; u64 last_extent_end = 0;
u64 prev_extent_end; u64 prev_extent_end;
u64 range_start; u64 range_start;
u64 range_end; u64 range_end;
......
...@@ -4185,6 +4185,8 @@ static int try_flush_qgroup(struct btrfs_root *root) ...@@ -4185,6 +4185,8 @@ static int try_flush_qgroup(struct btrfs_root *root)
return 0; return 0;
} }
btrfs_run_delayed_iputs(root->fs_info);
btrfs_wait_on_delayed_iputs(root->fs_info);
ret = btrfs_start_delalloc_snapshot(root, true); ret = btrfs_start_delalloc_snapshot(root, true);
if (ret < 0) if (ret < 0)
goto out; goto out;
......
...@@ -1985,7 +1985,7 @@ static bool is_reclaim_urgent(struct btrfs_space_info *space_info) ...@@ -1985,7 +1985,7 @@ static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
return unalloc < data_chunk_size; return unalloc < data_chunk_size;
} }
static int do_reclaim_sweep(struct btrfs_fs_info *fs_info, static void do_reclaim_sweep(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, int raid) struct btrfs_space_info *space_info, int raid)
{ {
struct btrfs_block_group *bg; struct btrfs_block_group *bg;
...@@ -2031,7 +2031,6 @@ static int do_reclaim_sweep(struct btrfs_fs_info *fs_info, ...@@ -2031,7 +2031,6 @@ static int do_reclaim_sweep(struct btrfs_fs_info *fs_info,
} }
up_read(&space_info->groups_sem); up_read(&space_info->groups_sem);
return 0;
} }
void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes) void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes)
...@@ -2074,21 +2073,15 @@ bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info) ...@@ -2074,21 +2073,15 @@ bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
return ret; return ret;
} }
int btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info) void btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info)
{ {
int ret;
int raid; int raid;
struct btrfs_space_info *space_info; struct btrfs_space_info *space_info;
list_for_each_entry(space_info, &fs_info->space_info, list) { list_for_each_entry(space_info, &fs_info->space_info, list) {
if (!btrfs_should_periodic_reclaim(space_info)) if (!btrfs_should_periodic_reclaim(space_info))
continue; continue;
for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) { for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++)
ret = do_reclaim_sweep(fs_info, space_info, raid); do_reclaim_sweep(fs_info, space_info, raid);
if (ret)
return ret;
} }
}
return ret;
} }
...@@ -294,6 +294,6 @@ void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s6 ...@@ -294,6 +294,6 @@ void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s6
void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready); void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready);
bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info); bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info);
int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info); int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info);
int btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info); void btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info);
#endif /* BTRFS_SPACE_INFO_H */ #endif /* BTRFS_SPACE_INFO_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment