Commit be1158cc authored by Theodore Ts'o's avatar Theodore Ts'o

jbd2: fold __process_buffer() into jbd2_log_do_checkpoint()

__process_buffer() is only called by jbd2_log_do_checkpoint(), and it
had a very complex locking protocol where it would be called with the
j_list_lock, and sometimes exit with the lock held (if the return code
was 0), or release the lock.

This was confusing both to humans and to smatch (which erronously
complained that the lock was taken twice).

Folding __process_buffer() to the caller allows us to simplify the
control flow, making the resulting function easier to read and reason
about, and dropping the compiled size of fs/jbd2/checkpoint.c by 150
bytes (over 4% of the text size).
Signed-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
parent ed8a1a76
No related merge requests found
...@@ -254,81 +254,6 @@ __flush_batch(journal_t *journal, int *batch_count) ...@@ -254,81 +254,6 @@ __flush_batch(journal_t *journal, int *batch_count)
*batch_count = 0; *batch_count = 0;
} }
/*
* Try to flush one buffer from the checkpoint list to disk.
*
* Return 1 if something happened which requires us to abort the current
* scan of the checkpoint list. Return <0 if the buffer has failed to
* be written out.
*
* Called with j_list_lock held and drops it if 1 is returned
*/
static int __process_buffer(journal_t *journal, struct journal_head *jh,
int *batch_count, transaction_t *transaction)
{
struct buffer_head *bh = jh2bh(jh);
int ret = 0;
if (buffer_locked(bh)) {
get_bh(bh);
spin_unlock(&journal->j_list_lock);
wait_on_buffer(bh);
/* the journal_head may have gone by now */
BUFFER_TRACE(bh, "brelse");
__brelse(bh);
ret = 1;
} else if (jh->b_transaction != NULL) {
transaction_t *t = jh->b_transaction;
tid_t tid = t->t_tid;
transaction->t_chp_stats.cs_forced_to_close++;
spin_unlock(&journal->j_list_lock);
if (unlikely(journal->j_flags & JBD2_UNMOUNT))
/*
* The journal thread is dead; so starting and
* waiting for a commit to finish will cause
* us to wait for a _very_ long time.
*/
printk(KERN_ERR "JBD2: %s: "
"Waiting for Godot: block %llu\n",
journal->j_devname,
(unsigned long long) bh->b_blocknr);
jbd2_log_start_commit(journal, tid);
jbd2_log_wait_commit(journal, tid);
ret = 1;
} else if (!buffer_dirty(bh)) {
ret = 1;
if (unlikely(buffer_write_io_error(bh)))
ret = -EIO;
get_bh(bh);
BUFFER_TRACE(bh, "remove from checkpoint");
__jbd2_journal_remove_checkpoint(jh);
spin_unlock(&journal->j_list_lock);
__brelse(bh);
} else {
/*
* Important: we are about to write the buffer, and
* possibly block, while still holding the journal lock.
* We cannot afford to let the transaction logic start
* messing around with this buffer before we write it to
* disk, as that would break recoverability.
*/
BUFFER_TRACE(bh, "queue");
get_bh(bh);
J_ASSERT_BH(bh, !buffer_jwrite(bh));
journal->j_chkpt_bhs[*batch_count] = bh;
__buffer_relink_io(jh);
transaction->t_chp_stats.cs_written++;
(*batch_count)++;
if (*batch_count == JBD2_NR_BATCH) {
spin_unlock(&journal->j_list_lock);
__flush_batch(journal, batch_count);
ret = 1;
}
}
return ret;
}
/* /*
* Perform an actual checkpoint. We take the first transaction on the * Perform an actual checkpoint. We take the first transaction on the
* list of transactions to be checkpointed and send all its buffers * list of transactions to be checkpointed and send all its buffers
...@@ -339,9 +264,11 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh, ...@@ -339,9 +264,11 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
*/ */
int jbd2_log_do_checkpoint(journal_t *journal) int jbd2_log_do_checkpoint(journal_t *journal)
{ {
struct journal_head *jh;
struct buffer_head *bh;
transaction_t *transaction; transaction_t *transaction;
tid_t this_tid; tid_t this_tid;
int result; int err, result, batch_count = 0;
jbd_debug(1, "Start checkpoint\n"); jbd_debug(1, "Start checkpoint\n");
...@@ -374,46 +301,92 @@ int jbd2_log_do_checkpoint(journal_t *journal) ...@@ -374,46 +301,92 @@ int jbd2_log_do_checkpoint(journal_t *journal)
* done (maybe it's a new transaction, but it fell at the same * done (maybe it's a new transaction, but it fell at the same
* address). * address).
*/ */
if (journal->j_checkpoint_transactions == transaction && if (journal->j_checkpoint_transactions != transaction ||
transaction->t_tid == this_tid) { transaction->t_tid != this_tid)
int batch_count = 0; goto out;
struct journal_head *jh;
int retry = 0, err;
while (!retry && transaction->t_checkpoint_list) { /* checkpoint all of the transaction's buffers */
while (transaction->t_checkpoint_list) {
jh = transaction->t_checkpoint_list; jh = transaction->t_checkpoint_list;
retry = __process_buffer(journal, jh, &batch_count, bh = jh2bh(jh);
transaction);
if (retry < 0 && !result) if (buffer_locked(bh)) {
result = retry;
if (!retry && (need_resched() ||
spin_needbreak(&journal->j_list_lock))) {
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
retry = 1; get_bh(bh);
break; wait_on_buffer(bh);
} /* the journal_head may have gone by now */
BUFFER_TRACE(bh, "brelse");
__brelse(bh);
goto retry;
} }
if (jh->b_transaction != NULL) {
transaction_t *t = jh->b_transaction;
tid_t tid = t->t_tid;
if (batch_count) { transaction->t_chp_stats.cs_forced_to_close++;
if (!retry) { spin_unlock(&journal->j_list_lock);
if (unlikely(journal->j_flags & JBD2_UNMOUNT))
/*
* The journal thread is dead; so
* starting and waiting for a commit
* to finish will cause us to wait for
* a _very_ long time.
*/
printk(KERN_ERR
"JBD2: %s: Waiting for Godot: block %llu\n",
journal->j_devname, (unsigned long long) bh->b_blocknr);
jbd2_log_start_commit(journal, tid);
jbd2_log_wait_commit(journal, tid);
goto retry;
}
if (!buffer_dirty(bh)) {
if (unlikely(buffer_write_io_error(bh)) && !result)
result = -EIO;
get_bh(bh);
BUFFER_TRACE(bh, "remove from checkpoint");
__jbd2_journal_remove_checkpoint(jh);
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
retry = 1; __brelse(bh);
goto retry;
} }
__flush_batch(journal, &batch_count); /*
* Important: we are about to write the buffer, and
* possibly block, while still holding the journal
* lock. We cannot afford to let the transaction
* logic start messing around with this buffer before
* we write it to disk, as that would break
* recoverability.
*/
BUFFER_TRACE(bh, "queue");
get_bh(bh);
J_ASSERT_BH(bh, !buffer_jwrite(bh));
journal->j_chkpt_bhs[batch_count++] = bh;
__buffer_relink_io(jh);
transaction->t_chp_stats.cs_written++;
if ((batch_count == JBD2_NR_BATCH) ||
need_resched() ||
spin_needbreak(&journal->j_list_lock))
goto unlock_and_flush;
} }
if (retry) { if (batch_count) {
unlock_and_flush:
spin_unlock(&journal->j_list_lock);
retry:
if (batch_count)
__flush_batch(journal, &batch_count);
spin_lock(&journal->j_list_lock); spin_lock(&journal->j_list_lock);
goto restart; goto restart;
} }
/* /*
* Now we have cleaned up the first transaction's checkpoint * Now we issued all of the transaction's buffers, let's deal
* list. Let's clean up the second one * with the buffers that are out for I/O.
*/ */
err = __wait_cp_io(journal, transaction); err = __wait_cp_io(journal, transaction);
if (!result) if (!result)
result = err; result = err;
}
out: out:
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
if (result < 0) if (result < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment