Commit 1deae276 authored by Theodore Ts'o's avatar Theodore Ts'o Committed by Ben Hutchings

ext4: use atomic64_t for the per-flexbg free_clusters count

commit 90ba983f upstream.

A user who was using a 8TB+ file system and with a very large flexbg
size (> 65536) could cause the atomic_t used in the struct flex_groups
to overflow.  This was detected by PaX security patchset:

http://forums.grsecurity.net/viewtopic.php?f=3&t=3289&p=12551#p12551

This bug was introduced in commit 9f24e420, so it's been around
since 2.6.30.  :-(

Fix this by using an atomic64_t for struct orlav_stats's
free_clusters.
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
Reviewed-by: default avatarLukas Czerner <lczerner@redhat.com>
[bwh: Backported to 3.2: adjust context]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent cee9973e
...@@ -309,8 +309,8 @@ struct ext4_group_desc ...@@ -309,8 +309,8 @@ struct ext4_group_desc
*/ */
struct flex_groups { struct flex_groups {
atomic64_t free_clusters;
atomic_t free_inodes; atomic_t free_inodes;
atomic_t free_clusters;
atomic_t used_dirs; atomic_t used_dirs;
}; };
......
...@@ -294,8 +294,8 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) ...@@ -294,8 +294,8 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
} }
struct orlov_stats { struct orlov_stats {
__u64 free_clusters;
__u32 free_inodes; __u32 free_inodes;
__u32 free_clusters;
__u32 used_dirs; __u32 used_dirs;
}; };
...@@ -312,7 +312,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, ...@@ -312,7 +312,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
if (flex_size > 1) { if (flex_size > 1) {
stats->free_inodes = atomic_read(&flex_group[g].free_inodes); stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
stats->free_clusters = atomic_read(&flex_group[g].free_clusters); stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
stats->used_dirs = atomic_read(&flex_group[g].used_dirs); stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
return; return;
} }
......
...@@ -2866,7 +2866,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ...@@ -2866,7 +2866,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, ext4_group_t flex_group = ext4_flex_group(sbi,
ac->ac_b_ex.fe_group); ac->ac_b_ex.fe_group);
atomic_sub(ac->ac_b_ex.fe_len, atomic64_sub(ac->ac_b_ex.fe_len,
&sbi->s_flex_groups[flex_group].free_clusters); &sbi->s_flex_groups[flex_group].free_clusters);
} }
...@@ -4724,7 +4724,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4724,7 +4724,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group); ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
atomic_add(count_clusters, atomic64_add(count_clusters,
&sbi->s_flex_groups[flex_group].free_clusters); &sbi->s_flex_groups[flex_group].free_clusters);
} }
...@@ -4869,7 +4869,7 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, ...@@ -4869,7 +4869,7 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group); ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
atomic_add(EXT4_NUM_B2C(sbi, blocks_freed), atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
&sbi->s_flex_groups[flex_group].free_clusters); &sbi->s_flex_groups[flex_group].free_clusters);
} }
......
...@@ -946,7 +946,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) ...@@ -946,7 +946,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
sbi->s_log_groups_per_flex) { sbi->s_log_groups_per_flex) {
ext4_group_t flex_group; ext4_group_t flex_group;
flex_group = ext4_flex_group(sbi, input->group); flex_group = ext4_flex_group(sbi, input->group);
atomic_add(EXT4_NUM_B2C(sbi, input->free_blocks_count), atomic64_add(EXT4_NUM_B2C(sbi, input->free_blocks_count),
&sbi->s_flex_groups[flex_group].free_clusters); &sbi->s_flex_groups[flex_group].free_clusters);
atomic_add(EXT4_INODES_PER_GROUP(sb), atomic_add(EXT4_INODES_PER_GROUP(sb),
&sbi->s_flex_groups[flex_group].free_inodes); &sbi->s_flex_groups[flex_group].free_inodes);
......
...@@ -2047,7 +2047,7 @@ static int ext4_fill_flex_info(struct super_block *sb) ...@@ -2047,7 +2047,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
flex_group = ext4_flex_group(sbi, i); flex_group = ext4_flex_group(sbi, i);
atomic_add(ext4_free_inodes_count(sb, gdp), atomic_add(ext4_free_inodes_count(sb, gdp),
&sbi->s_flex_groups[flex_group].free_inodes); &sbi->s_flex_groups[flex_group].free_inodes);
atomic_add(ext4_free_group_clusters(sb, gdp), atomic64_add(ext4_free_group_clusters(sb, gdp),
&sbi->s_flex_groups[flex_group].free_clusters); &sbi->s_flex_groups[flex_group].free_clusters);
atomic_add(ext4_used_dirs_count(sb, gdp), atomic_add(ext4_used_dirs_count(sb, gdp),
&sbi->s_flex_groups[flex_group].used_dirs); &sbi->s_flex_groups[flex_group].used_dirs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment