Commit f5a44db5 authored by Theodore Ts'o's avatar Theodore Ts'o

ext4: add explicit casts when masking cluster sizes

The missing casts can cause the high 64-bits of the physical blocks to
be lost.  Set up new macros which allows us to make sure the right
thing happen, even if at some point we end up supporting larger
logical block numbers.

Thanks to the Emese Revfy and the PaX security team for reporting this
issue.
Reported-by: default avatarPaX Team <pageexec@freemail.hu>
Reported-by: Emese Revfy <re.emese@gmail.com>                                 
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
Cc: stable@vger.kernel.org
parent 34cf865d
...@@ -268,6 +268,16 @@ struct ext4_io_submit { ...@@ -268,6 +268,16 @@ struct ext4_io_submit {
/* Translate # of blks to # of clusters */ /* Translate # of blks to # of clusters */
#define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \
(sbi)->s_cluster_bits) (sbi)->s_cluster_bits)
/* Mask out the low bits to get the starting block of the cluster */
#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \
~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
/* Get the cluster offset */
#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \
((ext4_lblk_t) (s)->s_cluster_ratio - 1))
/* /*
* Structure of a blocks group descriptor * Structure of a blocks group descriptor
......
...@@ -1851,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, ...@@ -1851,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
depth = ext_depth(inode); depth = ext_depth(inode);
if (!path[depth].p_ext) if (!path[depth].p_ext)
goto out; goto out;
b2 = le32_to_cpu(path[depth].p_ext->ee_block); b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
b2 &= ~(sbi->s_cluster_ratio - 1);
/* /*
* get the next allocated block if the extent in the path * get the next allocated block if the extent in the path
...@@ -1862,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, ...@@ -1862,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
b2 = ext4_ext_next_allocated_block(path); b2 = ext4_ext_next_allocated_block(path);
if (b2 == EXT_MAX_BLOCKS) if (b2 == EXT_MAX_BLOCKS)
goto out; goto out;
b2 &= ~(sbi->s_cluster_ratio - 1); b2 = EXT4_LBLK_CMASK(sbi, b2);
} }
/* check for wrap through zero on extent logical start block*/ /* check for wrap through zero on extent logical start block*/
...@@ -2521,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, ...@@ -2521,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
* extent, we have to mark the cluster as used (store negative * extent, we have to mark the cluster as used (store negative
* cluster number in partial_cluster). * cluster number in partial_cluster).
*/ */
unaligned = pblk & (sbi->s_cluster_ratio - 1); unaligned = EXT4_PBLK_COFF(sbi, pblk);
if (unaligned && (ee_len == num) && if (unaligned && (ee_len == num) &&
(*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
*partial_cluster = EXT4_B2C(sbi, pblk); *partial_cluster = EXT4_B2C(sbi, pblk);
...@@ -2615,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, ...@@ -2615,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
* accidentally freeing it later on * accidentally freeing it later on
*/ */
pblk = ext4_ext_pblock(ex); pblk = ext4_ext_pblock(ex);
if (pblk & (sbi->s_cluster_ratio - 1)) if (EXT4_PBLK_COFF(sbi, pblk))
*partial_cluster = *partial_cluster =
-((long long)EXT4_B2C(sbi, pblk)); -((long long)EXT4_B2C(sbi, pblk));
ex--; ex--;
...@@ -3770,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk) ...@@ -3770,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
{ {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t lblk_start, lblk_end; ext4_lblk_t lblk_start, lblk_end;
lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
lblk_end = lblk_start + sbi->s_cluster_ratio - 1; lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
return ext4_find_delalloc_range(inode, lblk_start, lblk_end); return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
...@@ -3829,9 +3828,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, ...@@ -3829,9 +3828,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
/* Check towards left side */ /* Check towards left side */
c_offset = lblk_start & (sbi->s_cluster_ratio - 1); c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
if (c_offset) { if (c_offset) {
lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
lblk_to = lblk_from + c_offset - 1; lblk_to = lblk_from + c_offset - 1;
if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
...@@ -3839,7 +3838,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, ...@@ -3839,7 +3838,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
} }
/* Now check towards right. */ /* Now check towards right. */
c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
if (allocated_clusters && c_offset) { if (allocated_clusters && c_offset) {
lblk_from = lblk_start + num_blks; lblk_from = lblk_start + num_blks;
lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
...@@ -4047,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, ...@@ -4047,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
struct ext4_ext_path *path) struct ext4_ext_path *path)
{ {
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ext4_lblk_t ex_cluster_start, ex_cluster_end; ext4_lblk_t ex_cluster_start, ex_cluster_end;
ext4_lblk_t rr_cluster_start; ext4_lblk_t rr_cluster_start;
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
...@@ -4065,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, ...@@ -4065,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
(rr_cluster_start == ex_cluster_start)) { (rr_cluster_start == ex_cluster_start)) {
if (rr_cluster_start == ex_cluster_end) if (rr_cluster_start == ex_cluster_end)
ee_start += ee_len - 1; ee_start += ee_len - 1;
map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
c_offset;
map->m_len = min(map->m_len, map->m_len = min(map->m_len,
(unsigned) sbi->s_cluster_ratio - c_offset); (unsigned) sbi->s_cluster_ratio - c_offset);
/* /*
...@@ -4220,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, ...@@ -4220,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
*/ */
map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
newex.ee_block = cpu_to_le32(map->m_lblk); newex.ee_block = cpu_to_le32(map->m_lblk);
cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); cluster_offset = EXT4_LBLK_CMASK(sbi, map->m_lblk);
/* /*
* If we are doing bigalloc, check to see if the extent returned * If we are doing bigalloc, check to see if the extent returned
...@@ -4288,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, ...@@ -4288,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
* needed so that future calls to get_implied_cluster_alloc() * needed so that future calls to get_implied_cluster_alloc()
* work correctly. * work correctly.
*/ */
offset = map->m_lblk & (sbi->s_cluster_ratio - 1); offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ar.len = EXT4_NUM_B2C(sbi, offset+allocated); ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
ar.goal -= offset; ar.goal -= offset;
ar.logical -= offset; ar.logical -= offset;
......
...@@ -4126,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, ...@@ -4126,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
ext4_get_group_no_and_offset(sb, goal, &group, &block); ext4_get_group_no_and_offset(sb, goal, &group, &block);
/* set up allocation goals */ /* set up allocation goals */
ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1); ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
ac->ac_status = AC_STATUS_CONTINUE; ac->ac_status = AC_STATUS_CONTINUE;
ac->ac_sb = sb; ac->ac_sb = sb;
ac->ac_inode = ar->inode; ac->ac_inode = ar->inode;
...@@ -4668,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4668,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
* blocks at the beginning or the end unless we are explicitly * blocks at the beginning or the end unless we are explicitly
* requested to avoid doing so. * requested to avoid doing so.
*/ */
overflow = block & (sbi->s_cluster_ratio - 1); overflow = EXT4_PBLK_COFF(sbi, block);
if (overflow) { if (overflow) {
if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
overflow = sbi->s_cluster_ratio - overflow; overflow = sbi->s_cluster_ratio - overflow;
...@@ -4682,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4682,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
count += overflow; count += overflow;
} }
} }
overflow = count & (sbi->s_cluster_ratio - 1); overflow = EXT4_LBLK_COFF(sbi, count);
if (overflow) { if (overflow) {
if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
if (count > overflow) if (count > overflow)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment