Commit 74a6d4b5 authored by Trond Myklebust's avatar Trond Myklebust

NFS: Further optimise nfs_lock_and_join_requests()

When locking the entire group in order to remove subrequests,
the locks are always taken in order, and with the page group
lock being taken after the page head is locked. The intention
is that:

1) The lock on the group head guarantees that requests may not
   be removed from the group (although new entries could be appended
   if we're not holding the group lock).
2) It is safe to drop and retake the page group lock while iterating
   through the list, in particular when waiting for a subrequest lock.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@primarydata.com>
parent b5bab9bf
...@@ -377,31 +377,17 @@ nfs_page_group_clear_bits(struct nfs_page *req) ...@@ -377,31 +377,17 @@ nfs_page_group_clear_bits(struct nfs_page *req)
* *
* returns 0 on success, < 0 on error. * returns 0 on success, < 0 on error.
*/ */
static int static void
nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, nfs_unroll_locks(struct inode *inode, struct nfs_page *head,
struct nfs_page *req) struct nfs_page *req)
{ {
struct nfs_page *tmp; struct nfs_page *tmp;
int ret;
/* relinquish all the locks successfully grabbed this run */ /* relinquish all the locks successfully grabbed this run */
for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page)
nfs_unlock_request(tmp); nfs_unlock_request(tmp);
WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
/* grab a ref on the request that will be waited on */
kref_get(&req->wb_kref);
nfs_page_group_unlock(head);
/* release ref from nfs_page_find_head_request_locked */
nfs_unlock_and_release_request(head);
ret = nfs_wait_on_request(req);
nfs_release_request(req);
return ret;
} }
/* /*
...@@ -525,19 +511,22 @@ nfs_lock_and_join_requests(struct page *page) ...@@ -525,19 +511,22 @@ nfs_lock_and_join_requests(struct page *page)
total_bytes = head->wb_bytes; total_bytes = head->wb_bytes;
for (subreq = head->wb_this_page; subreq != head; for (subreq = head->wb_this_page; subreq != head;
subreq = subreq->wb_this_page) { subreq = subreq->wb_this_page) {
if (!nfs_lock_request(subreq)) {
while (!nfs_lock_request(subreq)) {
/* /*
* releases page group bit lock and * Unlock page to allow nfs_page_group_sync_on_bit()
* page locks and all references * to succeed
*/ */
ret = nfs_unroll_locks_and_wait(inode, head, nfs_page_group_unlock(head);
subreq); ret = nfs_wait_on_request(subreq);
if (!ret)
if (ret == 0) ret = nfs_page_group_lock(head, false);
goto try_again; if (ret < 0) {
nfs_unroll_locks(inode, head, subreq);
nfs_unlock_and_release_request(head);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
}
/* /*
* Subrequests are always contiguous, non overlapping * Subrequests are always contiguous, non overlapping
* and in order - but may be repeated (mirrored writes). * and in order - but may be repeated (mirrored writes).
...@@ -549,7 +538,9 @@ nfs_lock_and_join_requests(struct page *page) ...@@ -549,7 +538,9 @@ nfs_lock_and_join_requests(struct page *page)
((subreq->wb_offset + subreq->wb_bytes) > ((subreq->wb_offset + subreq->wb_bytes) >
(head->wb_offset + total_bytes)))) { (head->wb_offset + total_bytes)))) {
nfs_unlock_request(subreq); nfs_unlock_request(subreq);
nfs_unroll_locks_and_wait(inode, head, subreq); nfs_unroll_locks(inode, head, subreq);
nfs_page_group_unlock(head);
nfs_unlock_and_release_request(head);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment