Commit 1dbfae01 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:
 "Convert ext4 to use the new mount API, and add support for the
  FS_IOC_GETFSLABEL and FS_IOC_SETFSLABEL ioctls.

  In addition the usual large number of clean ups and bug fixes, in
  particular for the fast_commit feature"

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (48 commits)
  ext4: don't use the orphan list when migrating an inode
  ext4: use BUG_ON instead of if condition followed by BUG
  ext4: fix a copy and paste typo
  ext4: set csum seed in tmp inode while migrating to extents
  ext4: remove unnecessary 'offset' assignment
  ext4: remove redundant o_start statement
  ext4: drop an always true check
  ext4: remove unused assignments
  ext4: remove redundant statement
  ext4: remove useless resetting io_end_size in mpage_process_page()
  ext4: allow to change s_last_trim_minblks via sysfs
  ext4: change s_last_trim_minblks type to unsigned long
  ext4: implement support for get/set fs label
  ext4: only set EXT4_MOUNT_QUOTA when journalled quota file is specified
  ext4: don't use kfree() on rcu protected pointer sbi->s_qf_names
  ext4: avoid trim error on fs with small groups
  ext4: fix an use-after-free issue about data=journal writeback mode
  ext4: fix null-ptr-deref in '__ext4_journal_ensure_credits'
  ext4: initialize err_blk before calling __ext4_get_inode_loc
  ext4: fix a possible ABBA deadlock due to busy PA
  ...
parents 11fc88c2 6eeaf88f
......@@ -246,7 +246,6 @@ ext4_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
if (IS_ERR(handle))
return PTR_ERR(handle);
ext4_fc_start_update(inode);
if ((type == ACL_TYPE_ACCESS) && acl) {
error = posix_acl_update_mode(mnt_userns, inode, &mode, &acl);
......@@ -264,7 +263,6 @@ ext4_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
}
out_stop:
ext4_journal_stop(handle);
ext4_fc_stop_update(inode);
if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
return error;
......
......@@ -303,7 +303,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
goto done;
brelse(bh);
bh = NULL;
offset = 0;
}
done:
err = 0;
......
......@@ -1298,6 +1298,8 @@ extern void ext4_set_bits(void *bm, int cur, int len);
/* Metadata checksum algorithm codes */
#define EXT4_CRC32C_CHKSUM 1
#define EXT4_LABEL_MAX 16
/*
* Structure of the super block
*/
......@@ -1347,7 +1349,7 @@ struct ext4_super_block {
/*60*/ __le32 s_feature_incompat; /* incompatible feature set */
__le32 s_feature_ro_compat; /* readonly-compatible feature set */
/*68*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */
/*78*/ char s_volume_name[16]; /* volume name */
/*78*/ char s_volume_name[EXT4_LABEL_MAX]; /* volume name */
/*88*/ char s_last_mounted[64] __nonstring; /* directory where last mounted */
/*C8*/ __le32 s_algorithm_usage_bitmap; /* For compression */
/*
......@@ -1661,7 +1663,7 @@ struct ext4_sb_info {
struct task_struct *s_mmp_tsk;
/* record the last minlen when FITRIM is called. */
atomic_t s_last_trim_minblks;
unsigned long s_last_trim_minblks;
/* Reference to checksum algorithm driver via cryptoapi */
struct crypto_shash *s_chksum_driver;
......@@ -1725,9 +1727,9 @@ struct ext4_sb_info {
*/
struct work_struct s_error_work;
/* Ext4 fast commit stuff */
/* Ext4 fast commit sub transaction ID */
atomic_t s_fc_subtid;
atomic_t s_fc_ineligible_updates;
/*
* After commit starts, the main queue gets locked, and the further
* updates get added in the staging queue.
......@@ -1747,7 +1749,6 @@ struct ext4_sb_info {
spinlock_t s_fc_lock;
struct buffer_head *s_fc_bh;
struct ext4_fc_stats s_fc_stats;
u64 s_fc_avg_commit_time;
#ifdef CONFIG_EXT4_DEBUG
int s_fc_debug_max_replay;
#endif
......@@ -2399,8 +2400,7 @@ ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
{
if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
BUG();
BUG_ON((len > blocksize) || (blocksize > (1 << 18)) || (len & 3));
#if (PAGE_SIZE >= 65536)
if (len < 65536)
return cpu_to_le16(len);
......@@ -2926,8 +2926,6 @@ void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
void ext4_fc_track_create(handle_t *handle, struct dentry *dentry);
void ext4_fc_track_inode(handle_t *handle, struct inode *inode);
void ext4_fc_mark_ineligible(struct super_block *sb, int reason);
void ext4_fc_start_ineligible(struct super_block *sb, int reason);
void ext4_fc_stop_ineligible(struct super_block *sb);
void ext4_fc_start_update(struct inode *inode);
void ext4_fc_stop_update(struct inode *inode);
void ext4_fc_del(struct inode *inode);
......@@ -2935,6 +2933,7 @@ bool ext4_fc_replay_check_excluded(struct super_block *sb, ext4_fsblk_t block);
void ext4_fc_replay_cleanup(struct super_block *sb);
int ext4_fc_commit(journal_t *journal, tid_t commit_tid);
int __init ext4_fc_init_dentry_cache(void);
void ext4_fc_destroy_dentry_cache(void);
/* mballoc.c */
extern const struct seq_operations ext4_mb_seq_groups_ops;
......@@ -3096,6 +3095,9 @@ extern int ext4_group_extend(struct super_block *sb,
struct ext4_super_block *es,
ext4_fsblk_t n_blocks_count);
extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
extern unsigned int ext4_list_backups(struct super_block *sb,
unsigned int *three, unsigned int *five,
unsigned int *seven);
/* super.c */
extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
......@@ -3110,6 +3112,8 @@ extern int ext4_read_bh_lock(struct buffer_head *bh, int op_flags, bool wait);
extern void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block);
extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
extern int ext4_calculate_overhead(struct super_block *sb);
extern __le32 ext4_superblock_csum(struct super_block *sb,
struct ext4_super_block *es);
extern void ext4_superblock_csum_set(struct super_block *sb);
extern int ext4_alloc_flex_bg_array(struct super_block *sb,
ext4_group_t ngroup);
......
......@@ -162,6 +162,8 @@ int __ext4_journal_ensure_credits(handle_t *handle, int check_cred,
{
if (!ext4_handle_valid(handle))
return 0;
if (is_handle_aborted(handle))
return -EROFS;
if (jbd2_handle_buffer_credits(handle) >= check_cred &&
handle->h_revoke_credits >= revoke_cred)
return 0;
......
......@@ -1496,8 +1496,7 @@ static int ext4_ext_search_left(struct inode *inode,
EXT4_ERROR_INODE(inode,
"ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block),
depth);
return -EFSCORRUPTED;
}
......@@ -2025,7 +2024,6 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ ext4_ext_get_actual_len(newext));
if (unwritten)
ext4_ext_mark_unwritten(ex);
eh = path[depth].p_hdr;
nearex = ex;
goto merge;
}
......@@ -2054,7 +2052,6 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+ ext4_ext_get_actual_len(newext));
if (unwritten)
ext4_ext_mark_unwritten(ex);
eh = path[depth].p_hdr;
nearex = ex;
goto merge;
}
......@@ -4647,8 +4644,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
ret = ext4_mark_inode_dirty(handle, inode);
if (unlikely(ret))
goto out_handle;
ext4_fc_track_range(handle, inode, offset >> inode->i_sb->s_blocksize_bits,
(offset + len - 1) >> inode->i_sb->s_blocksize_bits);
/* Zero out partial block at the edges of the range */
ret = ext4_zero_partial_blocks(handle, inode, offset, len);
if (ret >= 0)
......@@ -4697,8 +4692,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
FALLOC_FL_INSERT_RANGE))
return -EOPNOTSUPP;
ext4_fc_start_update(inode);
if (mode & FALLOC_FL_PUNCH_HOLE) {
ret = ext4_punch_hole(inode, offset, len);
goto exit;
......@@ -4762,7 +4755,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
inode_unlock(inode);
trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
exit:
ext4_fc_stop_update(inode);
return ret;
}
......@@ -5344,7 +5336,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
ret = PTR_ERR(handle);
goto out_mmap;
}
ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode, 0);
......@@ -5383,7 +5375,6 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
out_stop:
ext4_journal_stop(handle);
ext4_fc_stop_ineligible(sb);
out_mmap:
filemap_invalidate_unlock(mapping);
out_mutex:
......@@ -5485,7 +5476,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
ret = PTR_ERR(handle);
goto out_mmap;
}
ext4_fc_start_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
/* Expand file to avoid data loss if there is error while shifting */
inode->i_size += len;
......@@ -5560,7 +5551,6 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
out_stop:
ext4_journal_stop(handle);
ext4_fc_stop_ineligible(sb);
out_mmap:
filemap_invalidate_unlock(mapping);
out_mutex:
......
This diff is collapsed.
......@@ -71,21 +71,19 @@ struct ext4_fc_tail {
};
/*
* Fast commit reason codes
* Fast commit status codes
*/
enum {
EXT4_FC_STATUS_OK = 0,
EXT4_FC_STATUS_INELIGIBLE,
EXT4_FC_STATUS_SKIPPED,
EXT4_FC_STATUS_FAILED,
};
/*
* Fast commit ineligiblity reasons:
*/
enum {
/*
* Commit status codes:
*/
EXT4_FC_REASON_OK = 0,
EXT4_FC_REASON_INELIGIBLE,
EXT4_FC_REASON_ALREADY_COMMITTED,
EXT4_FC_REASON_FC_START_FAILED,
EXT4_FC_REASON_FC_FAILED,
/*
* Fast commit ineligiblity reasons:
*/
EXT4_FC_REASON_XATTR = 0,
EXT4_FC_REASON_CROSS_RENAME,
EXT4_FC_REASON_JOURNAL_FLAG_CHANGE,
......@@ -117,7 +115,10 @@ struct ext4_fc_stats {
unsigned int fc_ineligible_reason_count[EXT4_FC_REASON_MAX];
unsigned long fc_num_commits;
unsigned long fc_ineligible_commits;
unsigned long fc_failed_commits;
unsigned long fc_skipped_commits;
unsigned long fc_numblks;
u64 s_fc_avg_commit_time;
};
#define EXT4_FC_REPLAY_REALLOC_INCREMENT 4
......
......@@ -259,7 +259,6 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
if (iocb->ki_flags & IOCB_NOWAIT)
return -EOPNOTSUPP;
ext4_fc_start_update(inode);
inode_lock(inode);
ret = ext4_write_checks(iocb, from);
if (ret <= 0)
......@@ -271,7 +270,6 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
out:
inode_unlock(inode);
ext4_fc_stop_update(inode);
if (likely(ret > 0)) {
iocb->ki_pos += ret;
ret = generic_write_sync(iocb, ret);
......@@ -552,9 +550,7 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
ext4_fc_start_update(inode);
ret = ext4_orphan_add(handle, inode);
ext4_fc_stop_update(inode);
if (ret) {
ext4_journal_stop(handle);
goto out;
......
......@@ -741,10 +741,11 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
if (ret)
return ret;
}
ext4_fc_track_range(handle, inode, map->m_lblk,
map->m_lblk + map->m_len - 1);
}
if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN ||
map->m_flags & EXT4_MAP_MAPPED))
ext4_fc_track_range(handle, inode, map->m_lblk,
map->m_lblk + map->m_len - 1);
if (retval < 0)
ext_debug(inode, "failed with err %d\n", retval);
return retval;
......@@ -1844,30 +1845,16 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
return 0;
}
static int bget_one(handle_t *handle, struct inode *inode,
struct buffer_head *bh)
{
get_bh(bh);
return 0;
}
static int bput_one(handle_t *handle, struct inode *inode,
struct buffer_head *bh)
{
put_bh(bh);
return 0;
}
static int __ext4_journalled_writepage(struct page *page,
unsigned int len)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
struct buffer_head *page_bufs = NULL;
handle_t *handle = NULL;
int ret = 0, err = 0;
int inline_data = ext4_has_inline_data(inode);
struct buffer_head *inode_bh = NULL;
loff_t size;
ClearPageChecked(page);
......@@ -1877,14 +1864,6 @@ static int __ext4_journalled_writepage(struct page *page,
inode_bh = ext4_journalled_write_inline_data(inode, len, page);
if (inode_bh == NULL)
goto out;
} else {
page_bufs = page_buffers(page);
if (!page_bufs) {
BUG();
goto out;
}
ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
NULL, bget_one);
}
/*
* We need to release the page lock before we start the
......@@ -1905,7 +1884,8 @@ static int __ext4_journalled_writepage(struct page *page,
lock_page(page);
put_page(page);
if (page->mapping != mapping) {
size = i_size_read(inode);
if (page->mapping != mapping || page_offset(page) > size) {
/* The page got truncated from under us */
ext4_journal_stop(handle);
ret = 0;
......@@ -1915,6 +1895,13 @@ static int __ext4_journalled_writepage(struct page *page,
if (inline_data) {
ret = ext4_mark_inode_dirty(handle, inode);
} else {
struct buffer_head *page_bufs = page_buffers(page);
if (page->index == size >> PAGE_SHIFT)
len = size & ~PAGE_MASK;
else
len = PAGE_SIZE;
ret = ext4_walk_page_buffers(handle, inode, page_bufs, 0, len,
NULL, do_journal_get_write_access);
......@@ -1935,9 +1922,6 @@ static int __ext4_journalled_writepage(struct page *page,
out:
unlock_page(page);
out_no_pagelock:
if (!inline_data && page_bufs)
ext4_walk_page_buffers(NULL, inode, page_bufs, 0, len,
NULL, bput_one);
brelse(inode_bh);
return ret;
}
......@@ -2257,7 +2241,6 @@ static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
io_end_vec->size += io_end_size;
io_end_size = 0;
err = mpage_process_page_bufs(mpd, head, bh, lblk);
if (err > 0)
......@@ -2282,7 +2265,6 @@ static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
} while (lblk++, (bh = bh->b_this_page) != head);
io_end_vec->size += io_end_size;
io_end_size = 0;
*map_bh = false;
out:
*m_lblk = lblk;
......@@ -4523,7 +4505,7 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
static int __ext4_get_inode_loc_noinmem(struct inode *inode,
struct ext4_iloc *iloc)
{
ext4_fsblk_t err_blk;
ext4_fsblk_t err_blk = 0;
int ret;
ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc,
......@@ -4538,7 +4520,7 @@ static int __ext4_get_inode_loc_noinmem(struct inode *inode,
int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
{
ext4_fsblk_t err_blk;
ext4_fsblk_t err_blk = 0;
int ret;
ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc,
......@@ -5320,7 +5302,7 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
if (error)
return error;
}
ext4_fc_start_update(inode);
if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
(ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
handle_t *handle;
......@@ -5344,7 +5326,6 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
if (error) {
ext4_journal_stop(handle);
ext4_fc_stop_update(inode);
return error;
}
/* Update corresponding info in inode so that everything is in
......@@ -5356,7 +5337,6 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
error = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
if (unlikely(error)) {
ext4_fc_stop_update(inode);
return error;
}
}
......@@ -5370,12 +5350,10 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
if (attr->ia_size > sbi->s_bitmap_maxbytes) {
ext4_fc_stop_update(inode);
return -EFBIG;
}
}
if (!S_ISREG(inode->i_mode)) {
ext4_fc_stop_update(inode);
return -EINVAL;
}
......@@ -5427,8 +5405,7 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
ext4_fc_track_range(handle, inode,
(attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
inode->i_sb->s_blocksize_bits,
(oldsize > 0 ? oldsize - 1 : 0) >>
inode->i_sb->s_blocksize_bits);
EXT_MAX_BLOCKS - 1);
else
ext4_fc_track_range(
handle, inode,
......@@ -5499,7 +5476,6 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
ext4_std_error(inode->i_sb, error);
if (!error)
error = rc;
ext4_fc_stop_update(inode);
return error;
}
......
This diff is collapsed.
......@@ -4814,7 +4814,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
*/
static noinline_for_stack int
ext4_mb_discard_group_preallocations(struct super_block *sb,
ext4_group_t group, int needed)
ext4_group_t group, int *busy)
{
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
struct buffer_head *bitmap_bh = NULL;
......@@ -4822,8 +4822,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
struct list_head list;
struct ext4_buddy e4b;
int err;
int busy = 0;
int free, free_total = 0;
int free = 0;
mb_debug(sb, "discard preallocation for group %u\n", group);
if (list_empty(&grp->bb_prealloc_list))
......@@ -4846,19 +4845,14 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
goto out_dbg;
}
if (needed == 0)
needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
INIT_LIST_HEAD(&list);
repeat:
free = 0;
ext4_lock_group(sb, group);
list_for_each_entry_safe(pa, tmp,
&grp->bb_prealloc_list, pa_group_list) {
spin_lock(&pa->pa_lock);
if (atomic_read(&pa->pa_count)) {
spin_unlock(&pa->pa_lock);
busy = 1;
*busy = 1;
continue;
}
if (pa->pa_deleted) {
......@@ -4898,22 +4892,13 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
}
free_total += free;
/* if we still need more blocks and some PAs were used, try again */
if (free_total < needed && busy) {
ext4_unlock_group(sb, group);
cond_resched();
busy = 0;
goto repeat;
}
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
put_bh(bitmap_bh);
out_dbg:
mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
free_total, group, grp->bb_free);
return free_total;
free, group, grp->bb_free);
return free;
}
/*
......@@ -5455,13 +5440,24 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
{
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
int ret;
int freed = 0;
int freed = 0, busy = 0;
int retry = 0;
trace_ext4_mb_discard_preallocations(sb, needed);
if (needed == 0)
needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
repeat:
for (i = 0; i < ngroups && needed > 0; i++) {
ret = ext4_mb_discard_group_preallocations(sb, i, needed);
ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
freed += ret;
needed -= ret;
cond_resched();
}
if (needed > 0 && busy && ++retry < 3) {
busy = 0;
goto repeat;
}
return freed;
......@@ -6373,7 +6369,7 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
ext4_lock_group(sb, group);
if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
minblocks < atomic_read(&EXT4_SB(sb)->s_last_trim_minblks)) {
minblocks < EXT4_SB(sb)->s_last_trim_minblks) {
ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
if (ret >= 0)
EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
......@@ -6404,6 +6400,7 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
*/
int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
{
struct request_queue *q = bdev_get_queue(sb->s_bdev);
struct ext4_group_info *grp;
ext4_group_t group, first_group, last_group;
ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
......@@ -6422,6 +6419,13 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
start >= max_blks ||
range->len < sb->s_blocksize)
return -EINVAL;
/* No point to try to trim less than discard granularity */
if (range->minlen < q->limits.discard_granularity) {
minlen = EXT4_NUM_B2C(EXT4_SB(sb),
q->limits.discard_granularity >> sb->s_blocksize_bits);
if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
goto out;
}
if (end >= max_blks)
end = max_blks - 1;
if (end <= first_data_blk)
......@@ -6474,7 +6478,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
}
if (!ret)
atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
EXT4_SB(sb)->s_last_trim_minblks = minlen;
out:
range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
......
......@@ -437,12 +437,12 @@ int ext4_ext_migrate(struct inode *inode)
percpu_down_write(&sbi->s_writepages_rwsem);
/*
* Worst case we can touch the allocation bitmaps, a bgd
* block, and a block to link in the orphan list. We do need
* need to worry about credits for modifying the quota inode.
* Worst case we can touch the allocation bitmaps and a block
* group descriptor block. We do need need to worry about
* credits for modifying the quota inode.
*/
handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
......@@ -459,6 +459,13 @@ int ext4_ext_migrate(struct inode *inode)
ext4_journal_stop(handle);
goto out_unlock;
}
/*
* Use the correct seed for checksum (i.e. the seed from 'inode'). This
* is so that the metadata blocks will have the correct checksum after
* the migration.
*/
ei = EXT4_I(inode);
EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed;
i_size_write(tmp_inode, i_size_read(inode));
/*
* Set the i_nlink to zero so it will be deleted later
......@@ -467,7 +474,6 @@ int ext4_ext_migrate(struct inode *inode)
clear_nlink(tmp_inode);
ext4_ext_tree_init(handle, tmp_inode);
ext4_orphan_add(handle, tmp_inode);
ext4_journal_stop(handle);
/*
......@@ -492,17 +498,10 @@ int ext4_ext_migrate(struct inode *inode)
handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
if (IS_ERR(handle)) {
/*
* It is impossible to update on-disk structures without
* a handle, so just rollback in-core changes and live other
* work to orphan_list_cleanup()
*/
ext4_orphan_del(NULL, tmp_inode);
retval = PTR_ERR(handle);
goto out_tmp_inode;
}
ei = EXT4_I(inode);
i_data = ei->i_data;
memset(&lb, 0, sizeof(lb));
......
......@@ -632,7 +632,6 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
/* Check hole before the start pos */
if (cur_blk + cur_len - 1 < o_start) {
if (next_blk == EXT_MAX_BLOCKS) {
o_start = o_end;
ret = -ENODATA;
goto out;
}
......
......@@ -717,12 +717,23 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
* sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
* For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
*/
static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
unsigned *five, unsigned *seven)
unsigned int ext4_list_backups(struct super_block *sb, unsigned int *three,
unsigned int *five, unsigned int *seven)
{
unsigned *min = three;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
unsigned int *min = three;
int mult = 3;
unsigned ret;
unsigned int ret;
if (ext4_has_feature_sparse_super2(sb)) {
do {
if (*min > 2)
return UINT_MAX;
ret = le32_to_cpu(es->s_backup_bgs[*min - 1]);
*min += 1;
} while (!ret);
return ret;
}
if (!ext4_has_feature_sparse_super(sb)) {
ret = *min;
......
This diff is collapsed.
......@@ -63,7 +63,7 @@ static ssize_t session_write_kbytes_show(struct ext4_sb_info *sbi, char *buf)
{
struct super_block *sb = sbi->s_buddy_cache->i_sb;
return snprintf(buf, PAGE_SIZE, "%lu\n",
return sysfs_emit(buf, "%lu\n",
(part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
sbi->s_sectors_written_start) >> 1);
}
......@@ -72,7 +72,7 @@ static ssize_t lifetime_write_kbytes_show(struct ext4_sb_info *sbi, char *buf)
{
struct super_block *sb = sbi->s_buddy_cache->i_sb;
return snprintf(buf, PAGE_SIZE, "%llu\n",
return sysfs_emit(buf, "%llu\n",
(unsigned long long)(sbi->s_kbytes_written +
((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
EXT4_SB(sb)->s_sectors_written_start) >> 1)));
......@@ -130,8 +130,8 @@ static ssize_t trigger_test_error(struct ext4_sb_info *sbi,
static ssize_t journal_task_show(struct ext4_sb_info *sbi, char *buf)
{
if (!sbi->s_journal)
return snprintf(buf, PAGE_SIZE, "<none>\n");
return snprintf(buf, PAGE_SIZE, "%d\n",
return sysfs_emit(buf, "<none>\n");
return sysfs_emit(buf, "%d\n",
task_pid_vnr(sbi->s_journal->j_task));
}
......@@ -245,6 +245,7 @@ EXT4_ATTR(last_error_time, 0444, last_error_time);
EXT4_ATTR(journal_task, 0444, journal_task);
EXT4_RW_ATTR_SBI_UI(mb_prefetch, s_mb_prefetch);
EXT4_RW_ATTR_SBI_UI(mb_prefetch_limit, s_mb_prefetch_limit);
EXT4_RW_ATTR_SBI_UL(last_trim_minblks, s_last_trim_minblks);
static unsigned int old_bump_val = 128;
EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val);
......@@ -295,6 +296,7 @@ static struct attribute *ext4_attrs[] = {
#endif
ATTR_LIST(mb_prefetch),
ATTR_LIST(mb_prefetch_limit),
ATTR_LIST(last_trim_minblks),
NULL,
};
ATTRIBUTE_GROUPS(ext4);
......@@ -357,7 +359,7 @@ static void *calc_ptr(struct ext4_attr *a, struct ext4_sb_info *sbi)
static ssize_t __print_tstamp(char *buf, __le32 lo, __u8 hi)
{
return snprintf(buf, PAGE_SIZE, "%lld\n",
return sysfs_emit(buf, "%lld\n",
((time64_t)hi << 32) + le32_to_cpu(lo));
}
......@@ -374,7 +376,7 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
switch (a->attr_id) {
case attr_delayed_allocation_blocks:
return snprintf(buf, PAGE_SIZE, "%llu\n",
return sysfs_emit(buf, "%llu\n",
(s64) EXT4_C2B(sbi,
percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
case attr_session_write_kbytes:
......@@ -382,11 +384,11 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
case attr_lifetime_write_kbytes:
return lifetime_write_kbytes_show(sbi, buf);
case attr_reserved_clusters:
return snprintf(buf, PAGE_SIZE, "%llu\n",
return sysfs_emit(buf, "%llu\n",
(unsigned long long)
atomic64_read(&sbi->s_resv_clusters));
case attr_sra_exceeded_retry_limit:
return snprintf(buf, PAGE_SIZE, "%llu\n",
return sysfs_emit(buf, "%llu\n",
(unsigned long long)
percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));
case attr_inode_readahead:
......@@ -394,42 +396,42 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
if (!ptr)
return 0;
if (a->attr_ptr == ptr_ext4_super_block_offset)
return snprintf(buf, PAGE_SIZE, "%u\n",
return sysfs_emit(buf, "%u\n",
le32_to_cpup(ptr));
else
return snprintf(buf, PAGE_SIZE, "%u\n",
return sysfs_emit(buf, "%u\n",
*((unsigned int *) ptr));
case attr_pointer_ul:
if (!ptr)
return 0;
return snprintf(buf, PAGE_SIZE, "%lu\n",
return sysfs_emit(buf, "%lu\n",
*((unsigned long *) ptr));
case attr_pointer_u8:
if (!ptr)
return 0;
return snprintf(buf, PAGE_SIZE, "%u\n",
return sysfs_emit(buf, "%u\n",
*((unsigned char *) ptr));
case attr_pointer_u64:
if (!ptr)
return 0;
if (a->attr_ptr == ptr_ext4_super_block_offset)
return snprintf(buf, PAGE_SIZE, "%llu\n",
return sysfs_emit(buf, "%llu\n",
le64_to_cpup(ptr));
else
return snprintf(buf, PAGE_SIZE, "%llu\n",
return sysfs_emit(buf, "%llu\n",
*((unsigned long long *) ptr));
case attr_pointer_string:
if (!ptr)
return 0;
return snprintf(buf, PAGE_SIZE, "%.*s\n", a->attr_size,
return sysfs_emit(buf, "%.*s\n", a->attr_size,
(char *) ptr);
case attr_pointer_atomic:
if (!ptr)
return 0;
return snprintf(buf, PAGE_SIZE, "%d\n",
return sysfs_emit(buf, "%d\n",
atomic_read((atomic_t *) ptr));
case attr_feature:
return snprintf(buf, PAGE_SIZE, "supported\n");
return sysfs_emit(buf, "supported\n");
case attr_first_error_time:
return print_tstamp(buf, sbi->s_es, s_first_error_time);
case attr_last_error_time:
......
......@@ -199,6 +199,8 @@ int fs_param_is_bool(struct p_log *log, const struct fs_parameter_spec *p,
int b;
if (param->type != fs_value_is_string)
return fs_param_bad_value(log, param);
if (!*param->string && (p->flags & fs_param_can_be_empty))
return 0;
b = lookup_constant(bool_names, param->string, -1);
if (b == -1)
return fs_param_bad_value(log, param);
......@@ -211,8 +213,11 @@ int fs_param_is_u32(struct p_log *log, const struct fs_parameter_spec *p,
struct fs_parameter *param, struct fs_parse_result *result)
{
int base = (unsigned long)p->data;
if (param->type != fs_value_is_string ||
kstrtouint(param->string, base, &result->uint_32) < 0)
if (param->type != fs_value_is_string)
return fs_param_bad_value(log, param);
if (!*param->string && (p->flags & fs_param_can_be_empty))
return 0;
if (kstrtouint(param->string, base, &result->uint_32) < 0)
return fs_param_bad_value(log, param);
return 0;
}
......@@ -221,8 +226,11 @@ EXPORT_SYMBOL(fs_param_is_u32);
int fs_param_is_s32(struct p_log *log, const struct fs_parameter_spec *p,
struct fs_parameter *param, struct fs_parse_result *result)
{
if (param->type != fs_value_is_string ||
kstrtoint(param->string, 0, &result->int_32) < 0)
if (param->type != fs_value_is_string)
return fs_param_bad_value(log, param);
if (!*param->string && (p->flags & fs_param_can_be_empty))
return 0;
if (kstrtoint(param->string, 0, &result->int_32) < 0)
return fs_param_bad_value(log, param);
return 0;
}
......@@ -231,8 +239,11 @@ EXPORT_SYMBOL(fs_param_is_s32);
int fs_param_is_u64(struct p_log *log, const struct fs_parameter_spec *p,
struct fs_parameter *param, struct fs_parse_result *result)
{
if (param->type != fs_value_is_string ||
kstrtoull(param->string, 0, &result->uint_64) < 0)
if (param->type != fs_value_is_string)
return fs_param_bad_value(log, param);
if (!*param->string && (p->flags & fs_param_can_be_empty))
return 0;
if (kstrtoull(param->string, 0, &result->uint_64) < 0)
return fs_param_bad_value(log, param);
return 0;
}
......@@ -244,6 +255,8 @@ int fs_param_is_enum(struct p_log *log, const struct fs_parameter_spec *p,
const struct constant_table *c;
if (param->type != fs_value_is_string)
return fs_param_bad_value(log, param);
if (!*param->string && (p->flags & fs_param_can_be_empty))
return 0;
c = __lookup_constant(p->data, param->string);
if (!c)
return fs_param_bad_value(log, param);
......@@ -255,7 +268,8 @@ EXPORT_SYMBOL(fs_param_is_enum);
int fs_param_is_string(struct p_log *log, const struct fs_parameter_spec *p,
struct fs_parameter *param, struct fs_parse_result *result)
{
if (param->type != fs_value_is_string || !*param->string)
if (param->type != fs_value_is_string ||
(!*param->string && !(p->flags & fs_param_can_be_empty)))
return fs_param_bad_value(log, param);
return 0;
}
......@@ -275,7 +289,8 @@ int fs_param_is_fd(struct p_log *log, const struct fs_parameter_spec *p,
{
switch (param->type) {
case fs_value_is_string:
if (kstrtouint(param->string, 0, &result->uint_32) < 0)
if ((!*param->string && !(p->flags & fs_param_can_be_empty)) ||
kstrtouint(param->string, 0, &result->uint_32) < 0)
break;
if (result->uint_32 <= INT_MAX)
return 0;
......
......@@ -757,6 +757,7 @@ int jbd2_fc_begin_commit(journal_t *journal, tid_t tid)
}
journal->j_flags |= JBD2_FAST_COMMIT_ONGOING;
write_unlock(&journal->j_state_lock);
jbd2_journal_lock_updates(journal);
return 0;
}
......@@ -768,6 +769,7 @@ EXPORT_SYMBOL(jbd2_fc_begin_commit);
*/
static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback)
{
jbd2_journal_unlock_updates(journal);
if (journal->j_fc_cleanup_callback)
journal->j_fc_cleanup_callback(journal, 0);
write_lock(&journal->j_state_lock);
......
......@@ -42,7 +42,7 @@ struct fs_parameter_spec {
u8 opt; /* Option number (returned by fs_parse()) */
unsigned short flags;
#define fs_param_neg_with_no 0x0002 /* "noxxx" is negative param */
#define fs_param_neg_with_empty 0x0004 /* "xxx=" is negative param */
#define fs_param_can_be_empty 0x0004 /* "xxx=" is allowed */
#define fs_param_deprecated 0x0008 /* The param is deprecated */
const void *data;
};
......
......@@ -2837,6 +2837,29 @@ TRACE_EVENT(ext4_fc_track_range,
__entry->end)
);
TRACE_EVENT(ext4_update_sb,
TP_PROTO(struct super_block *sb, ext4_fsblk_t fsblk,
unsigned int flags),
TP_ARGS(sb, fsblk, flags),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ext4_fsblk_t, fsblk)
__field(unsigned int, flags)
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->fsblk = fsblk;
__entry->flags = flags;
),
TP_printk("dev %d,%d fsblk %llu flags %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->fsblk, __entry->flags)
);
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment