Commit 7f2f12d9 authored by Trond Myklebust's avatar Trond Myklebust

NFS: Simplify nfs_wb_page()

Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent acdc53b2
...@@ -502,44 +502,6 @@ int nfs_reschedule_unstable_write(struct nfs_page *req) ...@@ -502,44 +502,6 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
} }
#endif #endif
/*
* Wait for a request to complete.
*
* Interruptible by fatal signals only.
*/
static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_page *req;
pgoff_t idx_end, next;
unsigned int res = 0;
int error;
if (npages == 0)
idx_end = ~0;
else
idx_end = idx_start + npages - 1;
next = idx_start;
while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) {
if (req->wb_index > idx_end)
break;
next = req->wb_index + 1;
BUG_ON(!NFS_WBACK_BUSY(req));
kref_get(&req->wb_kref);
spin_unlock(&inode->i_lock);
error = nfs_wait_on_request(req);
nfs_release_request(req);
spin_lock(&inode->i_lock);
if (error < 0)
return error;
res++;
}
return res;
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static int static int
nfs_need_commit(struct nfs_inode *nfsi) nfs_need_commit(struct nfs_inode *nfsi)
...@@ -1432,7 +1394,7 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr ...@@ -1432,7 +1394,7 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
return ret; return ret;
} }
#else #else
static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) static int nfs_commit_inode(struct inode *inode, int how)
{ {
return 0; return 0;
} }
...@@ -1448,46 +1410,6 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) ...@@ -1448,46 +1410,6 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
return nfs_commit_unstable_pages(inode, wbc); return nfs_commit_unstable_pages(inode, wbc);
} }
long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
{
struct inode *inode = mapping->host;
pgoff_t idx_start, idx_end;
unsigned int npages = 0;
LIST_HEAD(head);
long pages, ret;
/* FIXME */
if (wbc->range_cyclic)
idx_start = 0;
else {
idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
if (idx_end > idx_start) {
pgoff_t l_npages = 1 + idx_end - idx_start;
npages = l_npages;
if (sizeof(npages) != sizeof(l_npages) &&
(pgoff_t)npages != l_npages)
npages = 0;
}
}
spin_lock(&inode->i_lock);
do {
ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
if (ret != 0)
continue;
pages = nfs_scan_commit(inode, &head, idx_start, npages);
if (pages == 0)
break;
pages += nfs_scan_commit(inode, &head, 0, 0);
spin_unlock(&inode->i_lock);
ret = nfs_commit_list(inode, &head, how);
spin_lock(&inode->i_lock);
} while (ret >= 0);
spin_unlock(&inode->i_lock);
return ret;
}
/* /*
* flush the inode to disk. * flush the inode to disk.
*/ */
...@@ -1531,45 +1453,49 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) ...@@ -1531,45 +1453,49 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
return ret; return ret;
} }
static int nfs_wb_page_priority(struct inode *inode, struct page *page, /*
int how) * Write back all requests on one page - we do this before reading it.
*/
int nfs_wb_page(struct inode *inode, struct page *page)
{ {
loff_t range_start = page_offset(page); loff_t range_start = page_offset(page);
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
struct writeback_control wbc = { struct writeback_control wbc = {
.bdi = page->mapping->backing_dev_info,
.sync_mode = WB_SYNC_ALL, .sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX, .nr_to_write = 0,
.range_start = range_start, .range_start = range_start,
.range_end = range_end, .range_end = range_end,
}; };
struct nfs_page *req;
int need_commit;
int ret; int ret;
do { while(PagePrivate(page)) {
if (clear_page_dirty_for_io(page)) { if (clear_page_dirty_for_io(page)) {
ret = nfs_writepage_locked(page, &wbc); ret = nfs_writepage_locked(page, &wbc);
if (ret < 0) if (ret < 0)
goto out_error; goto out_error;
} else if (!PagePrivate(page)) }
req = nfs_find_and_lock_request(page);
if (!req)
break; break;
ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto out_error;
}
need_commit = test_bit(PG_CLEAN, &req->wb_flags);
nfs_clear_page_tag_locked(req);
if (need_commit) {
ret = nfs_commit_inode(inode, FLUSH_SYNC);
if (ret < 0) if (ret < 0)
goto out_error; goto out_error;
} while (PagePrivate(page)); }
}
return 0; return 0;
out_error: out_error:
__mark_inode_dirty(inode, I_DIRTY_PAGES);
return ret; return ret;
} }
/*
* Write back all requests on one page - we do this before reading it.
*/
int nfs_wb_page(struct inode *inode, struct page* page)
{
return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
}
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
int nfs_migrate_page(struct address_space *mapping, struct page *newpage, int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page) struct page *page)
......
...@@ -475,7 +475,6 @@ extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *); ...@@ -475,7 +475,6 @@ extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
* Try to write back everything synchronously (but check the * Try to write back everything synchronously (but check the
* return value!) * return value!)
*/ */
extern long nfs_sync_mapping_wait(struct address_space *, struct writeback_control *, int);
extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page* page); extern int nfs_wb_page(struct inode *inode, struct page* page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment