Commit 7c990728 authored by Suraj Jitindar Singh's avatar Suraj Jitindar Singh Committed by Theodore Ts'o

ext4: fix potential race between s_flex_groups online resizing and access

During an online resize an array of s_flex_groups structures gets replaced
so it can get enlarged. If there is a concurrent access to the array and
this memory has been reused then this can lead to an invalid memory access.

The s_flex_group array has been converted into an array of pointers rather
than an array of structures. This is to ensure that the information
contained in the structures cannot get out of sync during a resize due to
an accessor updating the value in the old structure after it has been
copied but before the array pointer is updated. Since the structures them-
selves are no longer copied but only the pointers to them this case is
mitigated.

Link: https://bugzilla.kernel.org/show_bug.cgi?id=206443
Link: https://lore.kernel.org/r/20200221053458.730016-4-tytso@mit.eduSigned-off-by: default avatarSuraj Jitindar Singh <surajjs@amazon.com>
Signed-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
Cc: stable@kernel.org
parent df3da4ea
...@@ -1512,7 +1512,7 @@ struct ext4_sb_info { ...@@ -1512,7 +1512,7 @@ struct ext4_sb_info {
unsigned int s_extent_max_zeroout_kb; unsigned int s_extent_max_zeroout_kb;
unsigned int s_log_groups_per_flex; unsigned int s_log_groups_per_flex;
struct flex_groups *s_flex_groups; struct flex_groups * __rcu *s_flex_groups;
ext4_group_t s_flex_groups_allocated; ext4_group_t s_flex_groups_allocated;
/* workqueue for reserved extent conversions (buffered io) */ /* workqueue for reserved extent conversions (buffered io) */
......
...@@ -328,11 +328,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) ...@@ -328,11 +328,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
percpu_counter_inc(&sbi->s_freeinodes_counter); percpu_counter_inc(&sbi->s_freeinodes_counter);
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
ext4_group_t f = ext4_flex_group(sbi, block_group); struct flex_groups *fg;
atomic_inc(&sbi->s_flex_groups[f].free_inodes); fg = sbi_array_rcu_deref(sbi, s_flex_groups,
ext4_flex_group(sbi, block_group));
atomic_inc(&fg->free_inodes);
if (is_directory) if (is_directory)
atomic_dec(&sbi->s_flex_groups[f].used_dirs); atomic_dec(&fg->used_dirs);
} }
BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata"); BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
fatal = ext4_handle_dirty_metadata(handle, NULL, bh2); fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
...@@ -368,12 +370,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, ...@@ -368,12 +370,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
int flex_size, struct orlov_stats *stats) int flex_size, struct orlov_stats *stats)
{ {
struct ext4_group_desc *desc; struct ext4_group_desc *desc;
struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
if (flex_size > 1) { if (flex_size > 1) {
stats->free_inodes = atomic_read(&flex_group[g].free_inodes); struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
stats->free_clusters = atomic64_read(&flex_group[g].free_clusters); s_flex_groups, g);
stats->used_dirs = atomic_read(&flex_group[g].used_dirs); stats->free_inodes = atomic_read(&fg->free_inodes);
stats->free_clusters = atomic64_read(&fg->free_clusters);
stats->used_dirs = atomic_read(&fg->used_dirs);
return; return;
} }
...@@ -1054,7 +1057,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, ...@@ -1054,7 +1057,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
ext4_group_t f = ext4_flex_group(sbi, group); ext4_group_t f = ext4_flex_group(sbi, group);
atomic_inc(&sbi->s_flex_groups[f].used_dirs); atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
f)->used_dirs);
} }
} }
if (ext4_has_group_desc_csum(sb)) { if (ext4_has_group_desc_csum(sb)) {
...@@ -1077,7 +1081,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, ...@@ -1077,7 +1081,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
flex_group = ext4_flex_group(sbi, group); flex_group = ext4_flex_group(sbi, group);
atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes); atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
flex_group)->free_inodes);
} }
inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
......
...@@ -3038,7 +3038,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ...@@ -3038,7 +3038,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
ext4_group_t flex_group = ext4_flex_group(sbi, ext4_group_t flex_group = ext4_flex_group(sbi,
ac->ac_b_ex.fe_group); ac->ac_b_ex.fe_group);
atomic64_sub(ac->ac_b_ex.fe_len, atomic64_sub(ac->ac_b_ex.fe_len,
&sbi->s_flex_groups[flex_group].free_clusters); &sbi_array_rcu_deref(sbi, s_flex_groups,
flex_group)->free_clusters);
} }
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
...@@ -4936,7 +4937,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4936,7 +4937,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group); ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
atomic64_add(count_clusters, atomic64_add(count_clusters,
&sbi->s_flex_groups[flex_group].free_clusters); &sbi_array_rcu_deref(sbi, s_flex_groups,
flex_group)->free_clusters);
} }
/* /*
...@@ -5093,7 +5095,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, ...@@ -5093,7 +5095,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group); ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
atomic64_add(clusters_freed, atomic64_add(clusters_freed,
&sbi->s_flex_groups[flex_group].free_clusters); &sbi_array_rcu_deref(sbi, s_flex_groups,
flex_group)->free_clusters);
} }
ext4_mb_unload_buddy(&e4b); ext4_mb_unload_buddy(&e4b);
......
...@@ -1430,11 +1430,14 @@ static void ext4_update_super(struct super_block *sb, ...@@ -1430,11 +1430,14 @@ static void ext4_update_super(struct super_block *sb,
percpu_counter_read(&sbi->s_freeclusters_counter)); percpu_counter_read(&sbi->s_freeclusters_counter));
if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) { if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
ext4_group_t flex_group; ext4_group_t flex_group;
struct flex_groups *fg;
flex_group = ext4_flex_group(sbi, group_data[0].group); flex_group = ext4_flex_group(sbi, group_data[0].group);
fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
&sbi->s_flex_groups[flex_group].free_clusters); &fg->free_clusters);
atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
&sbi->s_flex_groups[flex_group].free_inodes); &fg->free_inodes);
} }
/* /*
......
...@@ -1015,6 +1015,7 @@ static void ext4_put_super(struct super_block *sb) ...@@ -1015,6 +1015,7 @@ static void ext4_put_super(struct super_block *sb)
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es; struct ext4_super_block *es = sbi->s_es;
struct buffer_head **group_desc; struct buffer_head **group_desc;
struct flex_groups **flex_groups;
int aborted = 0; int aborted = 0;
int i, err; int i, err;
...@@ -1052,8 +1053,13 @@ static void ext4_put_super(struct super_block *sb) ...@@ -1052,8 +1053,13 @@ static void ext4_put_super(struct super_block *sb)
for (i = 0; i < sbi->s_gdb_count; i++) for (i = 0; i < sbi->s_gdb_count; i++)
brelse(group_desc[i]); brelse(group_desc[i]);
kvfree(group_desc); kvfree(group_desc);
flex_groups = rcu_dereference(sbi->s_flex_groups);
if (flex_groups) {
for (i = 0; i < sbi->s_flex_groups_allocated; i++)
kvfree(flex_groups[i]);
kvfree(flex_groups);
}
rcu_read_unlock(); rcu_read_unlock();
kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirs_counter);
...@@ -2384,8 +2390,8 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, ...@@ -2384,8 +2390,8 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{ {
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
struct flex_groups *new_groups; struct flex_groups **old_groups, **new_groups;
int size; int size, i;
if (!sbi->s_log_groups_per_flex) if (!sbi->s_log_groups_per_flex)
return 0; return 0;
...@@ -2394,22 +2400,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) ...@@ -2394,22 +2400,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
if (size <= sbi->s_flex_groups_allocated) if (size <= sbi->s_flex_groups_allocated)
return 0; return 0;
size = roundup_pow_of_two(size * sizeof(struct flex_groups)); new_groups = kvzalloc(roundup_pow_of_two(size *
new_groups = kvzalloc(size, GFP_KERNEL); sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
if (!new_groups) { if (!new_groups) {
ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups", ext4_msg(sb, KERN_ERR,
size / (int) sizeof(struct flex_groups)); "not enough memory for %d flex group pointers", size);
return -ENOMEM; return -ENOMEM;
} }
for (i = sbi->s_flex_groups_allocated; i < size; i++) {
if (sbi->s_flex_groups) { new_groups[i] = kvzalloc(roundup_pow_of_two(
memcpy(new_groups, sbi->s_flex_groups, sizeof(struct flex_groups)),
(sbi->s_flex_groups_allocated * GFP_KERNEL);
sizeof(struct flex_groups))); if (!new_groups[i]) {
kvfree(sbi->s_flex_groups); for (i--; i >= sbi->s_flex_groups_allocated; i--)
kvfree(new_groups[i]);
kvfree(new_groups);
ext4_msg(sb, KERN_ERR,
"not enough memory for %d flex groups", size);
return -ENOMEM;
} }
sbi->s_flex_groups = new_groups; }
sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups); rcu_read_lock();
old_groups = rcu_dereference(sbi->s_flex_groups);
if (old_groups)
memcpy(new_groups, old_groups,
(sbi->s_flex_groups_allocated *
sizeof(struct flex_groups *)));
rcu_read_unlock();
rcu_assign_pointer(sbi->s_flex_groups, new_groups);
sbi->s_flex_groups_allocated = size;
if (old_groups)
ext4_kvfree_array_rcu(old_groups);
return 0; return 0;
} }
...@@ -2417,6 +2438,7 @@ static int ext4_fill_flex_info(struct super_block *sb) ...@@ -2417,6 +2438,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
{ {
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_desc *gdp = NULL; struct ext4_group_desc *gdp = NULL;
struct flex_groups *fg;
ext4_group_t flex_group; ext4_group_t flex_group;
int i, err; int i, err;
...@@ -2434,12 +2456,11 @@ static int ext4_fill_flex_info(struct super_block *sb) ...@@ -2434,12 +2456,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
gdp = ext4_get_group_desc(sb, i, NULL); gdp = ext4_get_group_desc(sb, i, NULL);
flex_group = ext4_flex_group(sbi, i); flex_group = ext4_flex_group(sbi, i);
atomic_add(ext4_free_inodes_count(sb, gdp), fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
&sbi->s_flex_groups[flex_group].free_inodes); atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
atomic64_add(ext4_free_group_clusters(sb, gdp), atomic64_add(ext4_free_group_clusters(sb, gdp),
&sbi->s_flex_groups[flex_group].free_clusters); &fg->free_clusters);
atomic_add(ext4_used_dirs_count(sb, gdp), atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
&sbi->s_flex_groups[flex_group].used_dirs);
} }
return 1; return 1;
...@@ -3641,6 +3662,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -3641,6 +3662,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
struct buffer_head *bh, **group_desc; struct buffer_head *bh, **group_desc;
struct ext4_super_block *es = NULL; struct ext4_super_block *es = NULL;
struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
struct flex_groups **flex_groups;
ext4_fsblk_t block; ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data); ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block; ext4_fsblk_t logical_sb_block;
...@@ -4692,8 +4714,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -4692,8 +4714,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_unregister_li_request(sb); ext4_unregister_li_request(sb);
failed_mount6: failed_mount6:
ext4_mb_release(sb); ext4_mb_release(sb);
if (sbi->s_flex_groups) rcu_read_lock();
kvfree(sbi->s_flex_groups); flex_groups = rcu_dereference(sbi->s_flex_groups);
if (flex_groups) {
for (i = 0; i < sbi->s_flex_groups_allocated; i++)
kvfree(flex_groups[i]);
kvfree(flex_groups);
}
rcu_read_unlock();
percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirs_counter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment