Commit 34901f70 authored by Trond Myklebust's avatar Trond Myklebust

NFS: Writeback optimisation

Schedule writes using WB_SYNC_NONE first, then come back for a second pass
using WB_SYNC_ALL.
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent ed90ef51
...@@ -1325,27 +1325,39 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr ...@@ -1325,27 +1325,39 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr
return ret; return ret;
} }
static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
{
int ret;
ret = nfs_writepages(mapping, wbc);
if (ret < 0)
goto out;
ret = nfs_sync_mapping_wait(mapping, wbc, how);
if (ret < 0)
goto out;
return 0;
out:
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
return ret;
}
/* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */
static int nfs_write_mapping(struct address_space *mapping, int how) static int nfs_write_mapping(struct address_space *mapping, int how)
{ {
struct writeback_control wbc = { struct writeback_control wbc = {
.bdi = mapping->backing_dev_info, .bdi = mapping->backing_dev_info,
.sync_mode = WB_SYNC_ALL, .sync_mode = WB_SYNC_NONE,
.nr_to_write = LONG_MAX, .nr_to_write = LONG_MAX,
.for_writepages = 1, .for_writepages = 1,
.range_cyclic = 1, .range_cyclic = 1,
}; };
int ret; int ret;
ret = nfs_writepages(mapping, &wbc); ret = __nfs_write_mapping(mapping, &wbc, how);
if (ret < 0)
goto out;
ret = nfs_sync_mapping_wait(mapping, &wbc, how);
if (ret < 0) if (ret < 0)
goto out;
return 0;
out:
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
return ret; return ret;
wbc.sync_mode = WB_SYNC_ALL;
return __nfs_write_mapping(mapping, &wbc, how);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment