Commit a0e265bc authored by Trond Myklebust's avatar Trond Myklebust

NFS: Fix an ABBA issue in nfs_lock_and_join_requests()

All other callers of nfs_page_group_lock() appear to already hold the
page lock on the head page, so doing it in the opposite order here
is inefficient, although not deadlock prone since we roll back all
locks on contention.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@primarydata.com>
parent 7cb9cd9a
...@@ -383,7 +383,7 @@ nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, ...@@ -383,7 +383,7 @@ nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
int ret; int ret;
/* relinquish all the locks successfully grabbed this run */ /* relinquish all the locks successfully grabbed this run */
for (tmp = head ; tmp != req; tmp = tmp->wb_this_page) for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page)
nfs_unlock_request(tmp); nfs_unlock_request(tmp);
WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
...@@ -395,7 +395,7 @@ nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, ...@@ -395,7 +395,7 @@ nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
/* release ref from nfs_page_find_head_request_locked */ /* release ref from nfs_page_find_head_request_locked */
nfs_release_request(head); nfs_unlock_and_release_request(head);
ret = nfs_wait_on_request(req); ret = nfs_wait_on_request(req);
nfs_release_request(req); nfs_release_request(req);
...@@ -484,10 +484,6 @@ nfs_lock_and_join_requests(struct page *page) ...@@ -484,10 +484,6 @@ nfs_lock_and_join_requests(struct page *page)
int ret; int ret;
try_again: try_again:
total_bytes = 0;
WARN_ON_ONCE(destroy_list);
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
/* /*
...@@ -502,6 +498,16 @@ nfs_lock_and_join_requests(struct page *page) ...@@ -502,6 +498,16 @@ nfs_lock_and_join_requests(struct page *page)
return NULL; return NULL;
} }
/* lock the page head first in order to avoid an ABBA inefficiency */
if (!nfs_lock_request(head)) {
spin_unlock(&inode->i_lock);
ret = nfs_wait_on_request(head);
nfs_release_request(head);
if (ret < 0)
return ERR_PTR(ret);
goto try_again;
}
/* holding inode lock, so always make a non-blocking call to try the /* holding inode lock, so always make a non-blocking call to try the
* page group lock */ * page group lock */
ret = nfs_page_group_lock(head, true); ret = nfs_page_group_lock(head, true);
...@@ -509,13 +515,14 @@ nfs_lock_and_join_requests(struct page *page) ...@@ -509,13 +515,14 @@ nfs_lock_and_join_requests(struct page *page)
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
nfs_page_group_lock_wait(head); nfs_page_group_lock_wait(head);
nfs_release_request(head); nfs_unlock_and_release_request(head);
goto try_again; goto try_again;
} }
/* lock each request in the page group */ /* lock each request in the page group */
subreq = head; total_bytes = head->wb_bytes;
do { for (subreq = head->wb_this_page; subreq != head;
subreq = subreq->wb_this_page) {
/* /*
* Subrequests are always contiguous, non overlapping * Subrequests are always contiguous, non overlapping
* and in order - but may be repeated (mirrored writes). * and in order - but may be repeated (mirrored writes).
...@@ -541,9 +548,7 @@ nfs_lock_and_join_requests(struct page *page) ...@@ -541,9 +548,7 @@ nfs_lock_and_join_requests(struct page *page)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
}
subreq = subreq->wb_this_page;
} while (subreq != head);
/* Now that all requests are locked, make sure they aren't on any list. /* Now that all requests are locked, make sure they aren't on any list.
* Commit list removal accounting is done after locks are dropped */ * Commit list removal accounting is done after locks are dropped */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment