Commit 7ad84aa9 authored by Trond Myklebust's avatar Trond Myklebust

NFS: Clean up - simplify nfs_lock_request()

We only have two places where we need to grab a reference when trying
to lock the nfs_page. We're better off making that explicit.
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
Cc: Fred Isaman <iisaman@netapp.com>
parent d1182b33
...@@ -657,6 +657,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *d ...@@ -657,6 +657,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *d
break; break;
} }
nfs_lock_request(req); nfs_lock_request(req);
kref_get(&req->wb_kref);
req->wb_index = pos >> PAGE_SHIFT; req->wb_index = pos >> PAGE_SHIFT;
req->wb_offset = pos & ~PAGE_MASK; req->wb_offset = pos & ~PAGE_MASK;
if (!nfs_pageio_add_request(desc, req)) { if (!nfs_pageio_add_request(desc, req)) {
......
...@@ -260,10 +260,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblo ...@@ -260,10 +260,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblo
req = nfs_page_find_request_locked(page); req = nfs_page_find_request_locked(page);
if (req == NULL) if (req == NULL)
break; break;
if (nfs_lock_request_dontget(req)) if (nfs_lock_request(req))
break; break;
/* Note: If we hold the page lock, as is the case in nfs_writepage, /* Note: If we hold the page lock, as is the case in nfs_writepage,
* then the call to nfs_lock_request_dontget() will always * then the call to nfs_lock_request() will always
* succeed provided that someone hasn't already marked the * succeed provided that someone hasn't already marked the
* request as dirty (in which case we don't care). * request as dirty (in which case we don't care).
*/ */
...@@ -406,7 +406,7 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) ...@@ -406,7 +406,7 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
struct nfs_inode *nfsi = NFS_I(inode); struct nfs_inode *nfsi = NFS_I(inode);
/* Lock the request! */ /* Lock the request! */
nfs_lock_request_dontget(req); nfs_lock_request(req);
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE)) if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
...@@ -651,6 +651,7 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst, ...@@ -651,6 +651,7 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
list_for_each_entry_safe(req, tmp, src, wb_list) { list_for_each_entry_safe(req, tmp, src, wb_list) {
if (!nfs_lock_request(req)) if (!nfs_lock_request(req))
continue; continue;
kref_get(&req->wb_kref);
if (cond_resched_lock(cinfo->lock)) if (cond_resched_lock(cinfo->lock))
list_safe_reset_next(req, tmp, wb_list); list_safe_reset_next(req, tmp, wb_list);
nfs_request_remove_commit_list(req, cinfo); nfs_request_remove_commit_list(req, cinfo);
...@@ -741,7 +742,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, ...@@ -741,7 +742,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
|| end < req->wb_offset) || end < req->wb_offset)
goto out_flushme; goto out_flushme;
if (nfs_lock_request_dontget(req)) if (nfs_lock_request(req))
break; break;
/* The request is locked, so wait and then retry */ /* The request is locked, so wait and then retry */
...@@ -1717,7 +1718,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) ...@@ -1717,7 +1718,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
req = nfs_page_find_request(page); req = nfs_page_find_request(page);
if (req == NULL) if (req == NULL)
break; break;
if (nfs_lock_request_dontget(req)) { if (nfs_lock_request(req)) {
nfs_clear_request_commit(req); nfs_clear_request_commit(req);
nfs_inode_remove_request(req); nfs_inode_remove_request(req);
/* /*
......
...@@ -99,24 +99,14 @@ extern void nfs_unlock_request(struct nfs_page *req); ...@@ -99,24 +99,14 @@ extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_request_dont_release(struct nfs_page *req); extern void nfs_unlock_request_dont_release(struct nfs_page *req);
/* /*
* Lock the page of an asynchronous request without getting a new reference * Lock the page of an asynchronous request
*/ */
static inline int
nfs_lock_request_dontget(struct nfs_page *req)
{
return !test_and_set_bit(PG_BUSY, &req->wb_flags);
}
static inline int static inline int
nfs_lock_request(struct nfs_page *req) nfs_lock_request(struct nfs_page *req)
{ {
if (test_and_set_bit(PG_BUSY, &req->wb_flags)) return !test_and_set_bit(PG_BUSY, &req->wb_flags);
return 0;
kref_get(&req->wb_kref);
return 1;
} }
/** /**
* nfs_list_add_request - Insert a request into a list * nfs_list_add_request - Insert a request into a list
* @req: request * @req: request
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment