Commit 188943a1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fs-for_v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull ext2, udf, reiserfs, and quota updates from Jan Kara:

 - Fix for udf to make splicing work again

 - More disk format sanity checks for ext2 to avoid crashes found by
   syzbot

 - More quota disk format checks to avoid crashes found by fuzzing

 - Reiserfs & isofs cleanups

* tag 'fs-for_v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  quota: Add more checking after reading from quota file
  quota: Replace all block number checking with helper function
  quota: Check next/prev free block number after reading from quota file
  ext2: Use kvmalloc() for group descriptor array
  ext2: Add sanity checks for group and filesystem size
  udf: Support splicing to file
  isofs: delete unnecessary checks before brelse()
  fs/reiserfs: replace ternary operator with min() and min_t()
parents abf625dc 191249f7
...@@ -163,7 +163,7 @@ static void ext2_put_super (struct super_block * sb) ...@@ -163,7 +163,7 @@ static void ext2_put_super (struct super_block * sb)
db_count = sbi->s_gdb_count; db_count = sbi->s_gdb_count;
for (i = 0; i < db_count; i++) for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]); brelse(sbi->s_group_desc[i]);
kfree(sbi->s_group_desc); kvfree(sbi->s_group_desc);
kfree(sbi->s_debts); kfree(sbi->s_debts);
percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter);
...@@ -1052,6 +1052,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1052,6 +1052,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_blocks_per_group); sbi->s_blocks_per_group);
goto failed_mount; goto failed_mount;
} }
/* At least inode table, bitmaps, and sb have to fit in one group */
if (sbi->s_blocks_per_group <= sbi->s_itb_per_group + 3) {
ext2_msg(sb, KERN_ERR,
"error: #blocks per group smaller than metadata size: %lu <= %lu",
sbi->s_blocks_per_group, sbi->s_inodes_per_group + 3);
goto failed_mount;
}
if (sbi->s_frags_per_group > sb->s_blocksize * 8) { if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR, ext2_msg(sb, KERN_ERR,
"error: #fragments per group too big: %lu", "error: #fragments per group too big: %lu",
...@@ -1065,9 +1072,14 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1065,9 +1072,14 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_inodes_per_group); sbi->s_inodes_per_group);
goto failed_mount; goto failed_mount;
} }
if (sb_bdev_nr_blocks(sb) < le32_to_cpu(es->s_blocks_count)) {
ext2_msg(sb, KERN_ERR,
"bad geometry: block count %u exceeds size of device (%u blocks)",
le32_to_cpu(es->s_blocks_count),
(unsigned)sb_bdev_nr_blocks(sb));
goto failed_mount;
}
if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext2;
sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
le32_to_cpu(es->s_first_data_block) - 1) le32_to_cpu(es->s_first_data_block) - 1)
/ EXT2_BLOCKS_PER_GROUP(sb)) + 1; / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
...@@ -1080,7 +1092,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1080,7 +1092,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
} }
db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) / db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
EXT2_DESC_PER_BLOCK(sb); EXT2_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc_array(db_count, sbi->s_group_desc = kvmalloc_array(db_count,
sizeof(struct buffer_head *), sizeof(struct buffer_head *),
GFP_KERNEL); GFP_KERNEL);
if (sbi->s_group_desc == NULL) { if (sbi->s_group_desc == NULL) {
...@@ -1206,7 +1218,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1206,7 +1218,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
for (i = 0; i < db_count; i++) for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]); brelse(sbi->s_group_desc[i]);
failed_mount_group_desc: failed_mount_group_desc:
kfree(sbi->s_group_desc); kvfree(sbi->s_group_desc);
kfree(sbi->s_debts); kfree(sbi->s_debts);
failed_mount: failed_mount:
brelse(bh); brelse(bh);
......
...@@ -1277,13 +1277,11 @@ static int isofs_read_level3_size(struct inode *inode) ...@@ -1277,13 +1277,11 @@ static int isofs_read_level3_size(struct inode *inode)
} while (more_entries); } while (more_entries);
out: out:
kfree(tmpde); kfree(tmpde);
if (bh) brelse(bh);
brelse(bh);
return 0; return 0;
out_nomem: out_nomem:
if (bh) brelse(bh);
brelse(bh);
return -ENOMEM; return -ENOMEM;
out_noread: out_noread:
...@@ -1486,8 +1484,7 @@ static int isofs_read_inode(struct inode *inode, int relocated) ...@@ -1486,8 +1484,7 @@ static int isofs_read_inode(struct inode *inode, int relocated)
ret = 0; ret = 0;
out: out:
kfree(tmpde); kfree(tmpde);
if (bh) brelse(bh);
brelse(bh);
return ret; return ret;
out_badread: out_badread:
......
...@@ -71,6 +71,40 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) ...@@ -71,6 +71,40 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
return ret; return ret;
} }
static inline int do_check_range(struct super_block *sb, const char *val_name,
uint val, uint min_val, uint max_val)
{
if (val < min_val || val > max_val) {
quota_error(sb, "Getting %s %u out of range %u-%u",
val_name, val, min_val, max_val);
return -EUCLEAN;
}
return 0;
}
static int check_dquot_block_header(struct qtree_mem_dqinfo *info,
struct qt_disk_dqdbheader *dh)
{
int err = 0;
err = do_check_range(info->dqi_sb, "dqdh_next_free",
le32_to_cpu(dh->dqdh_next_free), 0,
info->dqi_blocks - 1);
if (err)
return err;
err = do_check_range(info->dqi_sb, "dqdh_prev_free",
le32_to_cpu(dh->dqdh_prev_free), 0,
info->dqi_blocks - 1);
if (err)
return err;
err = do_check_range(info->dqi_sb, "dqdh_entries",
le16_to_cpu(dh->dqdh_entries), 0,
qtree_dqstr_in_blk(info));
return err;
}
/* Remove empty block from list and return it */ /* Remove empty block from list and return it */
static int get_free_dqblk(struct qtree_mem_dqinfo *info) static int get_free_dqblk(struct qtree_mem_dqinfo *info)
{ {
...@@ -85,6 +119,9 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info) ...@@ -85,6 +119,9 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info)
ret = read_blk(info, blk, buf); ret = read_blk(info, blk, buf);
if (ret < 0) if (ret < 0)
goto out_buf; goto out_buf;
ret = check_dquot_block_header(info, dh);
if (ret)
goto out_buf;
info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
} }
else { else {
...@@ -232,6 +269,9 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, ...@@ -232,6 +269,9 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
*err = read_blk(info, blk, buf); *err = read_blk(info, blk, buf);
if (*err < 0) if (*err < 0)
goto out_buf; goto out_buf;
*err = check_dquot_block_header(info, dh);
if (*err)
goto out_buf;
} else { } else {
blk = get_free_dqblk(info); blk = get_free_dqblk(info);
if ((int)blk < 0) { if ((int)blk < 0) {
...@@ -313,6 +353,10 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, ...@@ -313,6 +353,10 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
} }
ref = (__le32 *)buf; ref = (__le32 *)buf;
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
ret = do_check_range(dquot->dq_sb, "block", newblk, 0,
info->dqi_blocks - 1);
if (ret)
goto out_buf;
if (!newblk) if (!newblk)
newson = 1; newson = 1;
if (depth == info->dqi_qtree_depth - 1) { if (depth == info->dqi_qtree_depth - 1) {
...@@ -424,6 +468,9 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, ...@@ -424,6 +468,9 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
goto out_buf; goto out_buf;
} }
dh = (struct qt_disk_dqdbheader *)buf; dh = (struct qt_disk_dqdbheader *)buf;
ret = check_dquot_block_header(info, dh);
if (ret)
goto out_buf;
le16_add_cpu(&dh->dqdh_entries, -1); le16_add_cpu(&dh->dqdh_entries, -1);
if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
ret = remove_free_dqentry(info, buf, blk); ret = remove_free_dqentry(info, buf, blk);
...@@ -480,12 +527,10 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, ...@@ -480,12 +527,10 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
goto out_buf; goto out_buf;
} }
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
if (newblk < QT_TREEOFF || newblk >= info->dqi_blocks) { ret = do_check_range(dquot->dq_sb, "block", newblk, QT_TREEOFF,
quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)", info->dqi_blocks - 1);
newblk, info->dqi_blocks); if (ret)
ret = -EUCLEAN;
goto out_buf; goto out_buf;
}
if (depth == info->dqi_qtree_depth - 1) { if (depth == info->dqi_qtree_depth - 1) {
ret = free_dqentry(info, dquot, newblk); ret = free_dqentry(info, dquot, newblk);
...@@ -586,12 +631,10 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, ...@@ -586,12 +631,10 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
if (!blk) /* No reference? */ if (!blk) /* No reference? */
goto out_buf; goto out_buf;
if (blk < QT_TREEOFF || blk >= info->dqi_blocks) { ret = do_check_range(dquot->dq_sb, "block", blk, QT_TREEOFF,
quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)", info->dqi_blocks - 1);
blk, info->dqi_blocks); if (ret)
ret = -EUCLEAN;
goto out_buf; goto out_buf;
}
if (depth < info->dqi_qtree_depth - 1) if (depth < info->dqi_qtree_depth - 1)
ret = find_tree_dqentry(info, dquot, blk, depth+1); ret = find_tree_dqentry(info, dquot, blk, depth+1);
...@@ -705,15 +748,21 @@ static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id, ...@@ -705,15 +748,21 @@ static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
goto out_buf; goto out_buf;
} }
for (i = __get_index(info, *id, depth); i < epb; i++) { for (i = __get_index(info, *id, depth); i < epb; i++) {
if (ref[i] == cpu_to_le32(0)) { uint blk_no = le32_to_cpu(ref[i]);
if (blk_no == 0) {
*id += level_inc; *id += level_inc;
continue; continue;
} }
ret = do_check_range(info->dqi_sb, "block", blk_no, 0,
info->dqi_blocks - 1);
if (ret)
goto out_buf;
if (depth == info->dqi_qtree_depth - 1) { if (depth == info->dqi_qtree_depth - 1) {
ret = 0; ret = 0;
goto out_buf; goto out_buf;
} }
ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1); ret = find_next_id(info, id, blk_no, depth + 1);
if (ret != -ENOENT) if (ret != -ENOENT)
break; break;
} }
......
...@@ -456,7 +456,7 @@ static int print_internal(struct buffer_head *bh, int first, int last) ...@@ -456,7 +456,7 @@ static int print_internal(struct buffer_head *bh, int first, int last)
to = B_NR_ITEMS(bh); to = B_NR_ITEMS(bh);
} else { } else {
from = first; from = first;
to = last < B_NR_ITEMS(bh) ? last : B_NR_ITEMS(bh); to = min_t(int, last, B_NR_ITEMS(bh));
} }
reiserfs_printk("INTERNAL NODE (%ld) contains %z\n", bh->b_blocknr, bh); reiserfs_printk("INTERNAL NODE (%ld) contains %z\n", bh->b_blocknr, bh);
......
...@@ -97,7 +97,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new) ...@@ -97,7 +97,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
* using the copy_size var below allows this code to work for * using the copy_size var below allows this code to work for
* both shrinking and expanding the FS. * both shrinking and expanding the FS.
*/ */
copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr; copy_size = min(bmap_nr_new, bmap_nr);
copy_size = copy_size =
copy_size * sizeof(struct reiserfs_list_bitmap_node *); copy_size * sizeof(struct reiserfs_list_bitmap_node *);
for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
......
...@@ -2504,9 +2504,7 @@ static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data, ...@@ -2504,9 +2504,7 @@ static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data,
len = i_size - off; len = i_size - off;
toread = len; toread = len;
while (toread > 0) { while (toread > 0) {
tocopy = tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
sb->s_blocksize - offset <
toread ? sb->s_blocksize - offset : toread;
tmp_bh.b_state = 0; tmp_bh.b_state = 0;
/* /*
* Quota files are without tails so we can safely * Quota files are without tails so we can safely
...@@ -2554,8 +2552,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, ...@@ -2554,8 +2552,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
return -EIO; return -EIO;
} }
while (towrite > 0) { while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ? tocopy = min_t(unsigned long, sb->s_blocksize - offset, towrite);
sb->s_blocksize - offset : towrite;
tmp_bh.b_state = 0; tmp_bh.b_state = 0;
reiserfs_write_lock(sb); reiserfs_write_lock(sb);
err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE); err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE);
......
...@@ -252,6 +252,7 @@ const struct file_operations udf_file_operations = { ...@@ -252,6 +252,7 @@ const struct file_operations udf_file_operations = {
.release = udf_release_file, .release = udf_release_file,
.fsync = generic_file_fsync, .fsync = generic_file_fsync,
.splice_read = generic_file_splice_read, .splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.llseek = generic_file_llseek, .llseek = generic_file_llseek,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment