Commit 3fde3003 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ext4_for_linus-6.6-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 fixes from Ted Ts'o:
 "Regression and bug fixes for ext4"

* tag 'ext4_for_linus-6.6-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: fix rec_len verify error
  ext4: do not let fstrim block system suspend
  ext4: move setting of trimmed bit into ext4_try_to_trim_range()
  jbd2: Fix memory leak in journal_init_common()
  jbd2: Remove page size assumptions
  buffer: Make bh_offset() work for compound pages
parents f0b0d403 7fda67e8
......@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/nospec.h>
#include <linux/backing-dev.h>
#include <linux/freezer.h>
#include <trace/events/ext4.h>
/*
......@@ -6906,6 +6907,21 @@ __acquires(bitlock)
return ret;
}
static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
ext4_group_t grp)
{
if (grp < ext4_get_groups_count(sb))
return EXT4_CLUSTERS_PER_GROUP(sb) - 1;
return (ext4_blocks_count(EXT4_SB(sb)->s_es) -
ext4_group_first_block_no(sb, grp) - 1) >>
EXT4_CLUSTER_BITS(sb);
}
static bool ext4_trim_interrupted(void)
{
return fatal_signal_pending(current) || freezing(current);
}
static int ext4_try_to_trim_range(struct super_block *sb,
struct ext4_buddy *e4b, ext4_grpblk_t start,
ext4_grpblk_t max, ext4_grpblk_t minblocks)
......@@ -6913,9 +6929,12 @@ __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
__releases(ext4_group_lock_ptr(sb, e4b->bd_group))
{
ext4_grpblk_t next, count, free_count;
bool set_trimmed = false;
void *bitmap;
bitmap = e4b->bd_bitmap;
if (start == 0 && max >= ext4_last_grp_cluster(sb, e4b->bd_group))
set_trimmed = true;
start = max(e4b->bd_info->bb_first_free, start);
count = 0;
free_count = 0;
......@@ -6930,16 +6949,14 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
int ret = ext4_trim_extent(sb, start, next - start, e4b);
if (ret && ret != -EOPNOTSUPP)
break;
return count;
count += next - start;
}
free_count += next - start;
start = next + 1;
if (fatal_signal_pending(current)) {
count = -ERESTARTSYS;
break;
}
if (ext4_trim_interrupted())
return count;
if (need_resched()) {
ext4_unlock_group(sb, e4b->bd_group);
......@@ -6951,6 +6968,9 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
break;
}
if (set_trimmed)
EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
return count;
}
......@@ -6961,7 +6981,6 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
* @start: first group block to examine
* @max: last group block to examine
* @minblocks: minimum extent block count
* @set_trimmed: set the trimmed flag if at least one block is trimmed
*
* ext4_trim_all_free walks through group's block bitmap searching for free
* extents. When the free extent is found, mark it as used in group buddy
......@@ -6971,7 +6990,7 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
static ext4_grpblk_t
ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_grpblk_t start, ext4_grpblk_t max,
ext4_grpblk_t minblocks, bool set_trimmed)
ext4_grpblk_t minblocks)
{
struct ext4_buddy e4b;
int ret;
......@@ -6988,13 +7007,10 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_lock_group(sb, group);
if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
minblocks < EXT4_SB(sb)->s_last_trim_minblks) {
minblocks < EXT4_SB(sb)->s_last_trim_minblks)
ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
if (ret >= 0 && set_trimmed)
EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
} else {
else
ret = 0;
}
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
......@@ -7027,7 +7043,6 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
ext4_fsblk_t first_data_blk =
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
bool whole_group, eof = false;
int ret = 0;
start = range->start >> sb->s_blocksize_bits;
......@@ -7046,10 +7061,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
goto out;
}
if (end >= max_blks - 1) {
if (end >= max_blks - 1)
end = max_blks - 1;
eof = true;
}
if (end <= first_data_blk)
goto out;
if (start < first_data_blk)
......@@ -7063,9 +7076,10 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
/* end now represents the last cluster to discard in this group */
end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
whole_group = true;
for (group = first_group; group <= last_group; group++) {
if (ext4_trim_interrupted())
break;
grp = ext4_get_group_info(sb, group);
if (!grp)
continue;
......@@ -7082,13 +7096,11 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
* change it for the last group, note that last_cluster is
* already computed earlier by ext4_get_group_no_and_offset()
*/
if (group == last_group) {
if (group == last_group)
end = last_cluster;
whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1;
}
if (grp->bb_free >= minlen) {
cnt = ext4_trim_all_free(sb, group, first_cluster,
end, minlen, whole_group);
end, minlen);
if (cnt < 0) {
ret = cnt;
break;
......
......@@ -343,17 +343,17 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
struct buffer_head *bh)
{
struct ext4_dir_entry_tail *t;
int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
#ifdef PARANOID
struct ext4_dir_entry *d, *top;
d = (struct ext4_dir_entry *)bh->b_data;
top = (struct ext4_dir_entry *)(bh->b_data +
(EXT4_BLOCK_SIZE(inode->i_sb) -
sizeof(struct ext4_dir_entry_tail)));
while (d < top && d->rec_len)
(blocksize - sizeof(struct ext4_dir_entry_tail)));
while (d < top && ext4_rec_len_from_disk(d->rec_len, blocksize))
d = (struct ext4_dir_entry *)(((void *)d) +
le16_to_cpu(d->rec_len));
ext4_rec_len_from_disk(d->rec_len, blocksize));
if (d != top)
return NULL;
......@@ -364,7 +364,8 @@ static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
#endif
if (t->det_reserved_zero1 ||
le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
(ext4_rec_len_from_disk(t->det_rec_len, blocksize) !=
sizeof(struct ext4_dir_entry_tail)) ||
t->det_reserved_zero2 ||
t->det_reserved_ft != EXT4_FT_DIR_CSUM)
return NULL;
......@@ -445,13 +446,14 @@ static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
struct ext4_dir_entry *dp;
struct dx_root_info *root;
int count_offset;
int blocksize = EXT4_BLOCK_SIZE(inode->i_sb);
unsigned int rlen = ext4_rec_len_from_disk(dirent->rec_len, blocksize);
if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
if (rlen == blocksize)
count_offset = 8;
else if (le16_to_cpu(dirent->rec_len) == 12) {
else if (rlen == 12) {
dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
if (le16_to_cpu(dp->rec_len) !=
EXT4_BLOCK_SIZE(inode->i_sb) - 12)
if (ext4_rec_len_from_disk(dp->rec_len, blocksize) != blocksize - 12)
return NULL;
root = (struct dx_root_info *)(((void *)dp + 12));
if (root->reserved_zero ||
......@@ -1315,6 +1317,7 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
unsigned int buflen = bh->b_size;
char *base = bh->b_data;
struct dx_hash_info h = *hinfo;
int blocksize = EXT4_BLOCK_SIZE(dir->i_sb);
if (ext4_has_metadata_csum(dir->i_sb))
buflen -= sizeof(struct ext4_dir_entry_tail);
......@@ -1335,11 +1338,12 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
map_tail--;
map_tail->hash = h.hash;
map_tail->offs = ((char *) de - base)>>2;
map_tail->size = le16_to_cpu(de->rec_len);
map_tail->size = ext4_rec_len_from_disk(de->rec_len,
blocksize);
count++;
cond_resched();
}
de = ext4_next_entry(de, dir->i_sb->s_blocksize);
de = ext4_next_entry(de, blocksize);
}
return count;
}
......
......@@ -298,14 +298,12 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
{
struct page *page = bh->b_page;
char *addr;
__u32 checksum;
addr = kmap_atomic(page);
checksum = crc32_be(crc32_sum,
(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
kunmap_atomic(addr);
addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
checksum = crc32_be(crc32_sum, addr, bh->b_size);
kunmap_local(addr);
return checksum;
}
......@@ -322,7 +320,6 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
struct buffer_head *bh, __u32 sequence)
{
journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
struct page *page = bh->b_page;
__u8 *addr;
__u32 csum32;
__be32 seq;
......@@ -331,11 +328,10 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
return;
seq = cpu_to_be32(sequence);
addr = kmap_atomic(page);
addr = kmap_local_folio(bh->b_folio, bh_offset(bh));
csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
bh->b_size);
kunmap_atomic(addr);
csum32 = jbd2_chksum(j, csum32, addr, bh->b_size);
kunmap_local(addr);
if (jbd2_has_feature_csum3(j))
tag3->t_checksum = cpu_to_be32(csum32);
......
......@@ -1601,6 +1601,8 @@ static journal_t *journal_init_common(struct block_device *bdev,
err_cleanup:
percpu_counter_destroy(&journal->j_checkpoint_jh_count);
if (journal->j_chksum_driver)
crypto_free_shash(journal->j_chksum_driver);
kfree(journal->j_wbuf);
jbd2_journal_destroy_revoke(journal);
journal_fail_superblock(journal);
......
......@@ -935,19 +935,15 @@ static void warn_dirty_buffer(struct buffer_head *bh)
/* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */
static void jbd2_freeze_jh_data(struct journal_head *jh)
{
struct page *page;
int offset;
char *source;
struct buffer_head *bh = jh2bh(jh);
J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n");
page = bh->b_page;
offset = offset_in_page(bh->b_data);
source = kmap_atomic(page);
source = kmap_local_folio(bh->b_folio, bh_offset(bh));
/* Fire data frozen trigger just before we copy the data */
jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers);
memcpy(jh->b_frozen_data, source + offset, bh->b_size);
kunmap_atomic(source);
jbd2_buffer_frozen_trigger(jh, source, jh->b_triggers);
memcpy(jh->b_frozen_data, source, bh->b_size);
kunmap_local(source);
/*
* Now that the frozen data is saved off, we need to store any matching
......
......@@ -171,7 +171,10 @@ static __always_inline int buffer_uptodate(const struct buffer_head *bh)
return test_bit_acquire(BH_Uptodate, &bh->b_state);
}
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
static inline unsigned long bh_offset(const struct buffer_head *bh)
{
return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
}
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment