Commit f2a0b4d8 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] reiserfs: fsync() speedup

From: Chris Mason <mason@suse.com>

Updates the reiserfs-logging improvements to use schedule_timeout instead of
yield when letting the transaction grow a little before forcing a commit for
fsync/O_SYNC/O_DIRECT.

Also, when one process forces a transaction to end and plans on doing the
commit (like fsync), it sets a flag on the transaction so the journal code
knows not to bother kicking the journal work queue.

queue_delayed_work is used so that if we get a bunch of tiny transactions
ended quickly, we aren't constantly kicking the work queue.

These significantly improve reiserfs performance during fsync heavy
workloads.
parent c4373a9a
......@@ -86,6 +86,7 @@ static struct workqueue_struct *commit_wq;
/* journal list state bits */
#define LIST_TOUCHED 1
#define LIST_DIRTY 2
#define LIST_COMMIT_PENDING 4 /* someone will commit this list */
/* flags for do_journal_end */
#define FLUSH_ALL 1 /* flush commit and real blocks */
......@@ -2477,7 +2478,9 @@ static void let_transaction_grow(struct super_block *sb,
{
unsigned long bcount = SB_JOURNAL(sb)->j_bcount;
while(1) {
yield();
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
SB_JOURNAL(sb)->j_current_jl->j_state |= LIST_COMMIT_PENDING;
while ((atomic_read(&SB_JOURNAL(sb)->j_wcount) > 0 ||
atomic_read(&SB_JOURNAL(sb)->j_jlock)) &&
SB_JOURNAL(sb)->j_trans_id == trans_id) {
......@@ -2910,9 +2913,15 @@ static void flush_async_commits(void *p) {
flush_commit_list(p_s_sb, jl, 1);
}
unlock_kernel();
/*
* this is a little racey, but there's no harm in missing
* the filemap_fdata_write
*/
if (!atomic_read(&SB_JOURNAL(p_s_sb)->j_async_throttle)) {
atomic_inc(&SB_JOURNAL(p_s_sb)->j_async_throttle);
filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping);
atomic_dec(&SB_JOURNAL(p_s_sb)->j_async_throttle);
}
}
/*
......@@ -3001,7 +3010,8 @@ static int check_journal_end(struct reiserfs_transaction_handle *th, struct supe
jl = SB_JOURNAL(p_s_sb)->j_current_jl;
trans_id = jl->j_trans_id;
if (wait_on_commit)
jl->j_state |= LIST_COMMIT_PENDING;
atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 1) ;
if (flush) {
SB_JOURNAL(p_s_sb)->j_next_full_flush = 1 ;
......@@ -3523,8 +3533,8 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_b
if (flush) {
flush_commit_list(p_s_sb, jl, 1) ;
flush_journal_list(p_s_sb, jl, 1) ;
} else
queue_work(commit_wq, &SB_JOURNAL(p_s_sb)->j_work);
} else if (!(jl->j_state & LIST_COMMIT_PENDING))
queue_delayed_work(commit_wq, &SB_JOURNAL(p_s_sb)->j_work, HZ/10);
/* if the next transaction has any chance of wrapping, flush
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment