Commit 65b62a29 authored by Trond Myklebust's avatar Trond Myklebust

NFSv4: Ensure delegation recall and byte range lock removal don't conflict

Add a mutex to the struct nfs4_state_owner to ensure that delegation
recall doesn't conflict with byte range lock removal.

Note that we nest the new mutex _outside_ the state manager reclaim
protection (nfsi->rwsem) in order to avoid deadlocks.
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 37380e42
...@@ -71,8 +71,10 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_ ...@@ -71,8 +71,10 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
int status = 0; int status = 0;
if (inode->i_flock == NULL) if (inode->i_flock == NULL)
goto out; return 0;
if (inode->i_flock == NULL)
goto out;
/* Protect inode->i_flock using the file locks lock */ /* Protect inode->i_flock using the file locks lock */
lock_flocks(); lock_flocks();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
...@@ -113,12 +115,15 @@ static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *s ...@@ -113,12 +115,15 @@ static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *s
get_nfs_open_context(ctx); get_nfs_open_context(ctx);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
sp = state->owner; sp = state->owner;
/* Block nfs4_proc_unlck */
mutex_lock(&sp->so_delegreturn_mutex);
seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
err = nfs4_open_delegation_recall(ctx, state, stateid); err = nfs4_open_delegation_recall(ctx, state, stateid);
if (!err) if (!err)
err = nfs_delegation_claim_locks(ctx, state); err = nfs_delegation_claim_locks(ctx, state);
if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
err = -EAGAIN; err = -EAGAIN;
mutex_unlock(&sp->so_delegreturn_mutex);
put_nfs_open_context(ctx); put_nfs_open_context(ctx);
if (err != 0) if (err != 0)
return err; return err;
......
...@@ -93,6 +93,7 @@ struct nfs4_state_owner { ...@@ -93,6 +93,7 @@ struct nfs4_state_owner {
struct list_head so_states; struct list_head so_states;
struct nfs_seqid_counter so_seqid; struct nfs_seqid_counter so_seqid;
seqcount_t so_reclaim_seqcount; seqcount_t so_reclaim_seqcount;
struct mutex so_delegreturn_mutex;
}; };
enum { enum {
......
...@@ -4485,7 +4485,9 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, ...@@ -4485,7 +4485,9 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
{ {
struct nfs_inode *nfsi = NFS_I(state->inode); struct inode *inode = state->inode;
struct nfs4_state_owner *sp = state->owner;
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_seqid *seqid; struct nfs_seqid *seqid;
struct nfs4_lock_state *lsp; struct nfs4_lock_state *lsp;
struct rpc_task *task; struct rpc_task *task;
...@@ -4495,12 +4497,17 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * ...@@ -4495,12 +4497,17 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
status = nfs4_set_lock_state(state, request); status = nfs4_set_lock_state(state, request);
/* Unlock _before_ we do the RPC call */ /* Unlock _before_ we do the RPC call */
request->fl_flags |= FL_EXISTS; request->fl_flags |= FL_EXISTS;
/* Exclude nfs_delegation_claim_locks() */
mutex_lock(&sp->so_delegreturn_mutex);
/* Exclude nfs4_reclaim_open_stateid() - note nesting! */
down_read(&nfsi->rwsem); down_read(&nfsi->rwsem);
if (do_vfs_lock(request->fl_file, request) == -ENOENT) { if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
up_read(&nfsi->rwsem); up_read(&nfsi->rwsem);
mutex_unlock(&sp->so_delegreturn_mutex);
goto out; goto out;
} }
up_read(&nfsi->rwsem); up_read(&nfsi->rwsem);
mutex_unlock(&sp->so_delegreturn_mutex);
if (status != 0) if (status != 0)
goto out; goto out;
/* Is this a delegated lock? */ /* Is this a delegated lock? */
......
...@@ -519,6 +519,7 @@ nfs4_alloc_state_owner(struct nfs_server *server, ...@@ -519,6 +519,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
atomic_set(&sp->so_count, 1); atomic_set(&sp->so_count, 1);
INIT_LIST_HEAD(&sp->so_lru); INIT_LIST_HEAD(&sp->so_lru);
seqcount_init(&sp->so_reclaim_seqcount); seqcount_init(&sp->so_reclaim_seqcount);
mutex_init(&sp->so_delegreturn_mutex);
return sp; return sp;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment