Commit c988950e authored by Trond Myklebust's avatar Trond Myklebust

NFS: Simplify nfs_wb_page_cancel()

In all cases we should be able to just remove the request and call
cancel_dirty_page().
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 2928db1f
...@@ -540,19 +540,6 @@ static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, u ...@@ -540,19 +540,6 @@ static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, u
return res; return res;
} }
static void nfs_cancel_commit_list(struct list_head *head)
{
struct nfs_page *req;
while(!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_clear_request_commit(req);
nfs_inode_remove_request(req);
nfs_unlock_request(req);
}
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static int static int
nfs_need_commit(struct nfs_inode *nfsi) nfs_need_commit(struct nfs_inode *nfsi)
...@@ -1495,13 +1482,6 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr ...@@ -1495,13 +1482,6 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr
pages = nfs_scan_commit(inode, &head, idx_start, npages); pages = nfs_scan_commit(inode, &head, idx_start, npages);
if (pages == 0) if (pages == 0)
break; break;
if (how & FLUSH_INVALIDATE) {
spin_unlock(&inode->i_lock);
nfs_cancel_commit_list(&head);
ret = pages;
spin_lock(&inode->i_lock);
continue;
}
pages += nfs_scan_commit(inode, &head, 0, 0); pages += nfs_scan_commit(inode, &head, 0, 0);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
ret = nfs_commit_list(inode, &head, how); ret = nfs_commit_list(inode, &head, how);
...@@ -1558,26 +1538,13 @@ int nfs_wb_nocommit(struct inode *inode) ...@@ -1558,26 +1538,13 @@ int nfs_wb_nocommit(struct inode *inode)
int nfs_wb_page_cancel(struct inode *inode, struct page *page) int nfs_wb_page_cancel(struct inode *inode, struct page *page)
{ {
struct nfs_page *req; struct nfs_page *req;
loff_t range_start = page_offset(page);
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
struct writeback_control wbc = {
.bdi = page->mapping->backing_dev_info,
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
.range_start = range_start,
.range_end = range_end,
};
int ret = 0; int ret = 0;
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
for (;;) { for (;;) {
req = nfs_page_find_request(page); req = nfs_page_find_request(page);
if (req == NULL) if (req == NULL)
goto out;
if (test_bit(PG_CLEAN, &req->wb_flags)) {
nfs_release_request(req);
break; break;
}
if (nfs_lock_request_dontget(req)) { if (nfs_lock_request_dontget(req)) {
nfs_inode_remove_request(req); nfs_inode_remove_request(req);
/* /*
...@@ -1591,12 +1558,8 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) ...@@ -1591,12 +1558,8 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
ret = nfs_wait_on_request(req); ret = nfs_wait_on_request(req);
nfs_release_request(req); nfs_release_request(req);
if (ret < 0) if (ret < 0)
goto out; break;
} }
if (!PagePrivate(page))
return 0;
ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
out:
return ret; return ret;
} }
......
...@@ -34,8 +34,6 @@ ...@@ -34,8 +34,6 @@
#define FLUSH_LOWPRI 8 /* low priority background flush */ #define FLUSH_LOWPRI 8 /* low priority background flush */
#define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */ #define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */
#define FLUSH_NOCOMMIT 32 /* Don't send the NFSv3/v4 COMMIT */ #define FLUSH_NOCOMMIT 32 /* Don't send the NFSv3/v4 COMMIT */
#define FLUSH_INVALIDATE 64 /* Invalidate the page cache */
#define FLUSH_NOWRITEPAGE 128 /* Don't call writepage() */
#ifdef __KERNEL__ #ifdef __KERNEL__
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment