Commit 1704566f authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ext3: clean up journal_try_to_free_buffers()

Clean up ext3's journal_try_to_free_buffers().  Now that the
releasepage() a_op is non-blocking and need not perform I/O, this
function becomes much simpler.
parent 9d8e6506
...@@ -1601,8 +1601,7 @@ void journal_unfile_buffer(struct journal_head *jh) ...@@ -1601,8 +1601,7 @@ void journal_unfile_buffer(struct journal_head *jh)
* *
* Returns non-zero iff we were able to free the journal_head. * Returns non-zero iff we were able to free the journal_head.
*/ */
static int __journal_try_to_free_buffer(struct buffer_head *bh, static inline int __journal_try_to_free_buffer(struct buffer_head *bh)
int *locked_or_dirty)
{ {
struct journal_head *jh; struct journal_head *jh;
...@@ -1610,12 +1609,7 @@ static int __journal_try_to_free_buffer(struct buffer_head *bh, ...@@ -1610,12 +1609,7 @@ static int __journal_try_to_free_buffer(struct buffer_head *bh,
jh = bh2jh(bh); jh = bh2jh(bh);
if (buffer_locked(bh) || buffer_dirty(bh)) { if (buffer_locked(bh) || buffer_dirty(bh))
*locked_or_dirty = 1;
goto out;
}
if (!buffer_uptodate(bh)) /* AKPM: why? */
goto out; goto out;
if (jh->b_next_transaction != 0) if (jh->b_next_transaction != 0)
...@@ -1630,8 +1624,7 @@ static int __journal_try_to_free_buffer(struct buffer_head *bh, ...@@ -1630,8 +1624,7 @@ static int __journal_try_to_free_buffer(struct buffer_head *bh,
__journal_remove_journal_head(bh); __journal_remove_journal_head(bh);
__brelse(bh); __brelse(bh);
} }
} } else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) {
else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) {
/* written-back checkpointed metadata buffer */ /* written-back checkpointed metadata buffer */
if (jh->b_jlist == BJ_None) { if (jh->b_jlist == BJ_None) {
JBUFFER_TRACE(jh, "remove from checkpoint list"); JBUFFER_TRACE(jh, "remove from checkpoint list");
...@@ -1647,10 +1640,8 @@ static int __journal_try_to_free_buffer(struct buffer_head *bh, ...@@ -1647,10 +1640,8 @@ static int __journal_try_to_free_buffer(struct buffer_head *bh,
} }
/* /*
* journal_try_to_free_buffers(). For all the buffers on this page, * journal_try_to_free_buffers(). Try to remove all this page's buffers
* if they are fully written out ordered data, move them onto BUF_CLEAN * from the journal.
* so try_to_free_buffers() can reap them. Called with lru_list_lock
* not held. Does its own locking.
* *
* This complicates JBD locking somewhat. We aren't protected by the * This complicates JBD locking somewhat. We aren't protected by the
* BKL here. We wish to remove the buffer from its committing or * BKL here. We wish to remove the buffer from its committing or
...@@ -1669,50 +1660,28 @@ static int __journal_try_to_free_buffer(struct buffer_head *bh, ...@@ -1669,50 +1660,28 @@ static int __journal_try_to_free_buffer(struct buffer_head *bh,
* journal_try_to_free_buffer() is changing its state. But that * journal_try_to_free_buffer() is changing its state. But that
* cannot happen because we never reallocate freed data as metadata * cannot happen because we never reallocate freed data as metadata
* while the data is part of a transaction. Yes? * while the data is part of a transaction. Yes?
*
* This function returns non-zero if we wish try_to_free_buffers()
* to be called. We do this is the page is releasable by try_to_free_buffers().
* We also do it if the page has locked or dirty buffers and the caller wants
* us to perform sync or async writeout.
*/ */
int journal_try_to_free_buffers(journal_t *journal, int journal_try_to_free_buffers(journal_t *journal,
struct page *page, int gfp_mask) struct page *page, int unused_gfp_mask)
{ {
struct buffer_head *head;
struct buffer_head *bh; struct buffer_head *bh;
struct buffer_head *tmp; int ret = 0;
int locked_or_dirty = 0;
int call_ttfb = 1;
int ret;
J_ASSERT(PageLocked(page)); J_ASSERT(PageLocked(page));
bh = page_buffers(page); head = page_buffers(page);
tmp = bh; bh = head;
spin_lock(&journal_datalist_lock); spin_lock(&journal_datalist_lock);
do { do {
struct buffer_head *p = tmp; if (buffer_jbd(bh) && !__journal_try_to_free_buffer(bh)) {
spin_unlock(&journal_datalist_lock);
tmp = tmp->b_this_page; goto busy;
if (buffer_jbd(p)) }
if (!__journal_try_to_free_buffer(p, &locked_or_dirty)) } while ((bh = bh->b_this_page) != head);
call_ttfb = 0;
} while (tmp != bh);
spin_unlock(&journal_datalist_lock); spin_unlock(&journal_datalist_lock);
ret = try_to_free_buffers(page);
if (!(gfp_mask & (__GFP_IO|__GFP_WAIT))) busy:
goto out;
if (!locked_or_dirty)
goto out;
/*
* The VM wants us to do writeout, or to block on IO, or both.
* So we allow try_to_free_buffers to be called even if the page
* still has journalled buffers.
*/
call_ttfb = 1;
out:
ret = 0;
if (call_ttfb)
ret = try_to_free_buffers(page);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment