Commit 15b593ba authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ext4_for_linus-6.5-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 fixes from Ted Ts'o:
 "Bug and regression fixes for 6.5-rc3 for ext4's mballoc and jbd2's
  checkpoint code"

* tag 'ext4_for_linus-6.5-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: fix rbtree traversal bug in ext4_mb_use_preallocated
  ext4: fix off by one issue in ext4_mb_choose_next_group_best_avail()
  ext4: correct inline offset when handling xattrs in inode body
  jbd2: remove __journal_try_to_free_buffer()
  jbd2: fix a race when checking checkpoint buffer busy
  jbd2: Fix wrongly judgement for buffer head removing while doing checkpoint
  jbd2: remove journal_clean_one_cp_list()
  jbd2: remove t_checkpoint_io_list
  jbd2: recheck chechpointing non-dirty buffer
parents 8266f53b 9d3de7ee
......@@ -1006,14 +1006,11 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
* fls() instead since we need to know the actual length while modifying
* goal length.
*/
order = fls(ac->ac_g_ex.fe_len);
order = fls(ac->ac_g_ex.fe_len) - 1;
min_order = order - sbi->s_mb_best_avail_max_trim_order;
if (min_order < 0)
min_order = 0;
if (1 << min_order < ac->ac_o_ex.fe_len)
min_order = fls(ac->ac_o_ex.fe_len) + 1;
if (sbi->s_stripe > 0) {
/*
* We are assuming that stripe size is always a multiple of
......@@ -1021,9 +1018,16 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
*/
num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe);
if (1 << min_order < num_stripe_clusters)
min_order = fls(num_stripe_clusters);
/*
* We consider 1 order less because later we round
* up the goal len to num_stripe_clusters
*/
min_order = fls(num_stripe_clusters) - 1;
}
if (1 << min_order < ac->ac_o_ex.fe_len)
min_order = fls(ac->ac_o_ex.fe_len);
for (i = order; i >= min_order; i--) {
int frag_order;
/*
......@@ -4761,8 +4765,8 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
int order, i;
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_locality_group *lg;
struct ext4_prealloc_space *tmp_pa, *cpa = NULL;
ext4_lblk_t tmp_pa_start, tmp_pa_end;
struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
loff_t tmp_pa_end;
struct rb_node *iter;
ext4_fsblk_t goal_block;
......@@ -4770,47 +4774,151 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
return false;
/* first, try per-file preallocation */
/*
* first, try per-file preallocation by searching the inode pa rbtree.
*
* Here, we can't do a direct traversal of the tree because
* ext4_mb_discard_group_preallocation() can paralelly mark the pa
* deleted and that can cause direct traversal to skip some entries.
*/
read_lock(&ei->i_prealloc_lock);
if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) {
goto try_group_pa;
}
/*
* Step 1: Find a pa with logical start immediately adjacent to the
* original logical start. This could be on the left or right.
*
* (tmp_pa->pa_lstart never changes so we can skip locking for it).
*/
for (iter = ei->i_prealloc_node.rb_node; iter;
iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
tmp_pa_start, iter)) {
tmp_pa->pa_lstart, iter)) {
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
pa_node.inode_node);
}
/* all fields in this condition don't change,
* so we can skip locking for them */
tmp_pa_start = tmp_pa->pa_lstart;
tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
/*
* Step 2: The adjacent pa might be to the right of logical start, find
* the left adjacent pa. After this step we'd have a valid tmp_pa whose
* logical start is towards the left of original request's logical start
*/
if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) {
struct rb_node *tmp;
tmp = rb_prev(&tmp_pa->pa_node.inode_node);
/* original request start doesn't lie in this PA */
if (ac->ac_o_ex.fe_logical < tmp_pa_start ||
ac->ac_o_ex.fe_logical >= tmp_pa_end)
continue;
if (tmp) {
tmp_pa = rb_entry(tmp, struct ext4_prealloc_space,
pa_node.inode_node);
} else {
/*
* If there is no adjacent pa to the left then finding
* an overlapping pa is not possible hence stop searching
* inode pa tree
*/
goto try_group_pa;
}
}
BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
/*
* Step 3: If the left adjacent pa is deleted, keep moving left to find
* the first non deleted adjacent pa. After this step we should have a
* valid tmp_pa which is guaranteed to be non deleted.
*/
for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) {
if (!iter) {
/*
* no non deleted left adjacent pa, so stop searching
* inode pa tree
*/
goto try_group_pa;
}
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
pa_node.inode_node);
spin_lock(&tmp_pa->pa_lock);
if (tmp_pa->pa_deleted == 0) {
/*
* We will keep holding the pa_lock from
* this point on because we don't want group discard
* to delete this pa underneath us. Since group
* discard is anyways an ENOSPC operation it
* should be okay for it to wait a few more cycles.
*/
break;
} else {
spin_unlock(&tmp_pa->pa_lock);
}
}
BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
BUG_ON(tmp_pa->pa_deleted == 1);
/*
* Step 4: We now have the non deleted left adjacent pa. Only this
* pa can possibly satisfy the request hence check if it overlaps
* original logical start and stop searching if it doesn't.
*/
tmp_pa_end = (loff_t)tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
if (ac->ac_o_ex.fe_logical >= tmp_pa_end) {
spin_unlock(&tmp_pa->pa_lock);
goto try_group_pa;
}
/* non-extent files can't have physical blocks past 2^32 */
if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
(tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
EXT4_MAX_BLOCK_FILE_PHYS)) {
/*
* Since PAs don't overlap, we won't find any
* other PA to satisfy this.
* Since PAs don't overlap, we won't find any other PA to
* satisfy this.
*/
break;
spin_unlock(&tmp_pa->pa_lock);
goto try_group_pa;
}
/* found preallocated blocks, use them */
spin_lock(&tmp_pa->pa_lock);
if (tmp_pa->pa_deleted == 0 && tmp_pa->pa_free &&
likely(ext4_mb_pa_goal_check(ac, tmp_pa))) {
if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) {
atomic_inc(&tmp_pa->pa_count);
ext4_mb_use_inode_pa(ac, tmp_pa);
spin_unlock(&tmp_pa->pa_lock);
read_unlock(&ei->i_prealloc_lock);
return true;
} else {
/*
* We found a valid overlapping pa but couldn't use it because
* it had no free blocks. This should ideally never happen
* because:
*
* 1. When a new inode pa is added to rbtree it must have
* pa_free > 0 since otherwise we won't actually need
* preallocation.
*
* 2. An inode pa that is in the rbtree can only have it's
* pa_free become zero when another thread calls:
* ext4_mb_new_blocks
* ext4_mb_use_preallocated
* ext4_mb_use_inode_pa
*
* 3. Further, after the above calls make pa_free == 0, we will
* immediately remove it from the rbtree in:
* ext4_mb_new_blocks
* ext4_mb_release_context
* ext4_mb_put_pa
*
* 4. Since the pa_free becoming 0 and pa_free getting removed
* from tree both happen in ext4_mb_new_blocks, which is always
* called with i_data_sem held for data allocations, we can be
* sure that another process will never see a pa in rbtree with
* pa_free == 0.
*/
WARN_ON_ONCE(tmp_pa->pa_free == 0);
}
spin_unlock(&tmp_pa->pa_lock);
}
try_group_pa:
read_unlock(&ei->i_prealloc_lock);
/* can we use group allocation? */
......
......@@ -1782,6 +1782,20 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
memmove(here, (void *)here + size,
(void *)last - (void *)here + sizeof(__u32));
memset(last, 0, size);
/*
* Update i_inline_off - moved ibody region might contain
* system.data attribute. Handling a failure here won't
* cause other complications for setting an xattr.
*/
if (!is_block && ext4_has_inline_data(inode)) {
ret = ext4_find_inline_data_nolock(inode);
if (ret) {
ext4_warning_inode(inode,
"unable to update i_inline_off");
goto out;
}
}
} else if (s->not_found) {
/* Insert new name. */
size_t size = EXT4_XATTR_LEN(name_len);
......
This diff is collapsed.
......@@ -1141,8 +1141,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
spin_lock(&journal->j_list_lock);
commit_transaction->t_state = T_FINISHED;
/* Check if the transaction can be dropped now that we are finished */
if (commit_transaction->t_checkpoint_list == NULL &&
commit_transaction->t_checkpoint_io_list == NULL) {
if (commit_transaction->t_checkpoint_list == NULL) {
__jbd2_journal_drop_transaction(journal, commit_transaction);
jbd2_journal_free_transaction(commit_transaction);
}
......
......@@ -1784,8 +1784,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
* Otherwise, if the buffer has been written to disk,
* it is safe to remove the checkpoint and drop it.
*/
if (!buffer_dirty(bh)) {
__jbd2_journal_remove_checkpoint(jh);
if (jbd2_journal_try_remove_checkpoint(jh) >= 0) {
spin_unlock(&journal->j_list_lock);
goto drop;
}
......@@ -2100,35 +2099,6 @@ void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
__brelse(bh);
}
/*
* Called from jbd2_journal_try_to_free_buffers().
*
* Called under jh->b_state_lock
*/
static void
__journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
{
struct journal_head *jh;
jh = bh2jh(bh);
if (buffer_locked(bh) || buffer_dirty(bh))
goto out;
if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
goto out;
spin_lock(&journal->j_list_lock);
if (jh->b_cp_transaction != NULL) {
/* written-back checkpointed metadata buffer */
JBUFFER_TRACE(jh, "remove from checkpoint list");
__jbd2_journal_remove_checkpoint(jh);
}
spin_unlock(&journal->j_list_lock);
out:
return;
}
/**
* jbd2_journal_try_to_free_buffers() - try to free page buffers.
* @journal: journal for operation
......@@ -2186,7 +2156,13 @@ bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio)
continue;
spin_lock(&jh->b_state_lock);
__journal_try_to_free_buffer(journal, bh);
if (!jh->b_transaction && !jh->b_next_transaction) {
spin_lock(&journal->j_list_lock);
/* Remove written-back checkpointed metadata buffer */
if (jh->b_cp_transaction != NULL)
jbd2_journal_try_remove_checkpoint(jh);
spin_unlock(&journal->j_list_lock);
}
spin_unlock(&jh->b_state_lock);
jbd2_journal_put_journal_head(jh);
if (buffer_jbd(bh))
......
......@@ -613,12 +613,6 @@ struct transaction_s
*/
struct journal_head *t_checkpoint_list;
/*
* Doubly-linked circular list of all buffers submitted for IO while
* checkpointing. [j_list_lock]
*/
struct journal_head *t_checkpoint_io_list;
/*
* Doubly-linked circular list of metadata buffers being
* shadowed by log IO. The IO buffers on the iobuf list and
......@@ -1449,6 +1443,7 @@ extern void jbd2_journal_commit_transaction(journal_t *);
void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
int jbd2_journal_try_remove_checkpoint(struct journal_head *jh);
void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
......
......@@ -462,11 +462,9 @@ TRACE_EVENT(jbd2_shrink_scan_exit,
TRACE_EVENT(jbd2_shrink_checkpoint_list,
TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid,
unsigned long nr_freed, unsigned long nr_scanned,
tid_t next_tid),
unsigned long nr_freed, tid_t next_tid),
TP_ARGS(journal, first_tid, tid, last_tid, nr_freed,
nr_scanned, next_tid),
TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, next_tid),
TP_STRUCT__entry(
__field(dev_t, dev)
......@@ -474,7 +472,6 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list,
__field(tid_t, tid)
__field(tid_t, last_tid)
__field(unsigned long, nr_freed)
__field(unsigned long, nr_scanned)
__field(tid_t, next_tid)
),
......@@ -484,15 +481,14 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list,
__entry->tid = tid;
__entry->last_tid = last_tid;
__entry->nr_freed = nr_freed;
__entry->nr_scanned = nr_scanned;
__entry->next_tid = next_tid;
),
TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu "
"scanned %lu next transaction %u",
"next transaction %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->first_tid, __entry->tid, __entry->last_tid,
__entry->nr_freed, __entry->nr_scanned, __entry->next_tid)
__entry->nr_freed, __entry->next_tid)
);
#endif /* _TRACE_JBD2_H */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment