Commit 89ff7783 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfs-for-3.10-3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client bugfixes from Trond Myklebust:

 - Stable fix to prevent an rpc_task wakeup race
 - Fix a NFSv4.1 session drain deadlock
 - Fix a NFSv4/v4.1 mount regression when not running rpc.gssd
 - Ensure auth_gss pipe detection works in namespaces
 - Fix SETCLIENTID fallback if rpcsec_gss is not available

* tag 'nfs-for-3.10-3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs:
  NFS: Fix SETCLIENTID fallback if GSS is not available
  SUNRPC: Prevent an rpc_task wakeup race
  NFSv4.1 Fix a pNFS session draining deadlock
  SUNRPC: Convert auth_gss pipe detection to work in namespaces
  SUNRPC: Faster detection if gssd is actually running
  SUNRPC: Fix a bug in gss_create_upcall
parents 932ff06b 83c168bf
...@@ -414,7 +414,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, ...@@ -414,7 +414,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
spin_lock(&tbl->slot_tbl_lock); spin_lock(&tbl->slot_tbl_lock);
/* state manager is resetting the session */ /* state manager is resetting the session */
if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) { if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
spin_unlock(&tbl->slot_tbl_lock); spin_unlock(&tbl->slot_tbl_lock);
status = htonl(NFS4ERR_DELAY); status = htonl(NFS4ERR_DELAY);
/* Return NFS4ERR_BADSESSION if we're draining the session /* Return NFS4ERR_BADSESSION if we're draining the session
......
...@@ -763,7 +763,7 @@ static void nfs4_callback_free_slot(struct nfs4_session *session) ...@@ -763,7 +763,7 @@ static void nfs4_callback_free_slot(struct nfs4_session *session)
* A single slot, so highest used slotid is either 0 or -1 * A single slot, so highest used slotid is either 0 or -1
*/ */
tbl->highest_used_slotid = NFS4_NO_SLOT; tbl->highest_used_slotid = NFS4_NO_SLOT;
nfs4_session_drain_complete(session, tbl); nfs4_slot_tbl_drain_complete(tbl);
spin_unlock(&tbl->slot_tbl_lock); spin_unlock(&tbl->slot_tbl_lock);
} }
......
...@@ -203,7 +203,7 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp, ...@@ -203,7 +203,7 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags); __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I); error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I);
if (error == -EINVAL) if (error == -EINVAL)
error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_NULL); error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
if (error < 0) if (error < 0)
goto error; goto error;
......
...@@ -572,7 +572,7 @@ int nfs41_setup_sequence(struct nfs4_session *session, ...@@ -572,7 +572,7 @@ int nfs41_setup_sequence(struct nfs4_session *session,
task->tk_timeout = 0; task->tk_timeout = 0;
spin_lock(&tbl->slot_tbl_lock); spin_lock(&tbl->slot_tbl_lock);
if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) && if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
!args->sa_privileged) { !args->sa_privileged) {
/* The state manager will wait until the slot table is empty */ /* The state manager will wait until the slot table is empty */
dprintk("%s session is draining\n", __func__); dprintk("%s session is draining\n", __func__);
......
...@@ -73,7 +73,7 @@ void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot) ...@@ -73,7 +73,7 @@ void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
tbl->highest_used_slotid = new_max; tbl->highest_used_slotid = new_max;
else { else {
tbl->highest_used_slotid = NFS4_NO_SLOT; tbl->highest_used_slotid = NFS4_NO_SLOT;
nfs4_session_drain_complete(tbl->session, tbl); nfs4_slot_tbl_drain_complete(tbl);
} }
} }
dprintk("%s: slotid %u highest_used_slotid %d\n", __func__, dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
...@@ -226,7 +226,7 @@ static bool nfs41_assign_slot(struct rpc_task *task, void *pslot) ...@@ -226,7 +226,7 @@ static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
struct nfs4_slot *slot = pslot; struct nfs4_slot *slot = pslot;
struct nfs4_slot_table *tbl = slot->table; struct nfs4_slot_table *tbl = slot->table;
if (nfs4_session_draining(tbl->session) && !args->sa_privileged) if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
return false; return false;
slot->generation = tbl->generation; slot->generation = tbl->generation;
args->sa_slot = slot; args->sa_slot = slot;
......
...@@ -25,6 +25,10 @@ struct nfs4_slot { ...@@ -25,6 +25,10 @@ struct nfs4_slot {
}; };
/* Sessions */ /* Sessions */
enum nfs4_slot_tbl_state {
NFS4_SLOT_TBL_DRAINING,
};
#define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long)) #define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long))
struct nfs4_slot_table { struct nfs4_slot_table {
struct nfs4_session *session; /* Parent session */ struct nfs4_session *session; /* Parent session */
...@@ -43,6 +47,7 @@ struct nfs4_slot_table { ...@@ -43,6 +47,7 @@ struct nfs4_slot_table {
unsigned long generation; /* Generation counter for unsigned long generation; /* Generation counter for
target_highest_slotid */ target_highest_slotid */
struct completion complete; struct completion complete;
unsigned long slot_tbl_state;
}; };
/* /*
...@@ -68,7 +73,6 @@ struct nfs4_session { ...@@ -68,7 +73,6 @@ struct nfs4_session {
enum nfs4_session_state { enum nfs4_session_state {
NFS4_SESSION_INITING, NFS4_SESSION_INITING,
NFS4_SESSION_DRAINING,
}; };
#if defined(CONFIG_NFS_V4_1) #if defined(CONFIG_NFS_V4_1)
...@@ -88,12 +92,11 @@ extern void nfs4_destroy_session(struct nfs4_session *session); ...@@ -88,12 +92,11 @@ extern void nfs4_destroy_session(struct nfs4_session *session);
extern int nfs4_init_session(struct nfs_server *server); extern int nfs4_init_session(struct nfs_server *server);
extern int nfs4_init_ds_session(struct nfs_client *, unsigned long); extern int nfs4_init_ds_session(struct nfs_client *, unsigned long);
extern void nfs4_session_drain_complete(struct nfs4_session *session, extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
struct nfs4_slot_table *tbl);
static inline bool nfs4_session_draining(struct nfs4_session *session) static inline bool nfs4_slot_tbl_draining(struct nfs4_slot_table *tbl)
{ {
return !!test_bit(NFS4_SESSION_DRAINING, &session->session_state); return !!test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
} }
bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl, bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
......
...@@ -241,7 +241,7 @@ static void nfs4_end_drain_session(struct nfs_client *clp) ...@@ -241,7 +241,7 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
if (ses == NULL) if (ses == NULL)
return; return;
tbl = &ses->fc_slot_table; tbl = &ses->fc_slot_table;
if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
spin_lock(&tbl->slot_tbl_lock); spin_lock(&tbl->slot_tbl_lock);
nfs41_wake_slot_table(tbl); nfs41_wake_slot_table(tbl);
spin_unlock(&tbl->slot_tbl_lock); spin_unlock(&tbl->slot_tbl_lock);
...@@ -251,15 +251,15 @@ static void nfs4_end_drain_session(struct nfs_client *clp) ...@@ -251,15 +251,15 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
/* /*
* Signal state manager thread if session fore channel is drained * Signal state manager thread if session fore channel is drained
*/ */
void nfs4_session_drain_complete(struct nfs4_session *session, void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl)
struct nfs4_slot_table *tbl)
{ {
if (nfs4_session_draining(session)) if (nfs4_slot_tbl_draining(tbl))
complete(&tbl->complete); complete(&tbl->complete);
} }
static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl) static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl)
{ {
set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
spin_lock(&tbl->slot_tbl_lock); spin_lock(&tbl->slot_tbl_lock);
if (tbl->highest_used_slotid != NFS4_NO_SLOT) { if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
INIT_COMPLETION(tbl->complete); INIT_COMPLETION(tbl->complete);
...@@ -275,13 +275,12 @@ static int nfs4_begin_drain_session(struct nfs_client *clp) ...@@ -275,13 +275,12 @@ static int nfs4_begin_drain_session(struct nfs_client *clp)
struct nfs4_session *ses = clp->cl_session; struct nfs4_session *ses = clp->cl_session;
int ret = 0; int ret = 0;
set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
/* back channel */ /* back channel */
ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table); ret = nfs4_drain_slot_tbl(&ses->bc_slot_table);
if (ret) if (ret)
return ret; return ret;
/* fore channel */ /* fore channel */
return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); return nfs4_drain_slot_tbl(&ses->fc_slot_table);
} }
static void nfs41_finish_session_reset(struct nfs_client *clp) static void nfs41_finish_session_reset(struct nfs_client *clp)
......
...@@ -52,6 +52,8 @@ ...@@ -52,6 +52,8 @@
#include <linux/sunrpc/gss_api.h> #include <linux/sunrpc/gss_api.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "../netns.h"
static const struct rpc_authops authgss_ops; static const struct rpc_authops authgss_ops;
static const struct rpc_credops gss_credops; static const struct rpc_credops gss_credops;
...@@ -85,8 +87,6 @@ struct gss_auth { ...@@ -85,8 +87,6 @@ struct gss_auth {
}; };
/* pipe_version >= 0 if and only if someone has a pipe open. */ /* pipe_version >= 0 if and only if someone has a pipe open. */
static int pipe_version = -1;
static atomic_t pipe_users = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(pipe_version_lock); static DEFINE_SPINLOCK(pipe_version_lock);
static struct rpc_wait_queue pipe_version_rpc_waitqueue; static struct rpc_wait_queue pipe_version_rpc_waitqueue;
static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
...@@ -266,24 +266,27 @@ struct gss_upcall_msg { ...@@ -266,24 +266,27 @@ struct gss_upcall_msg {
char databuf[UPCALL_BUF_LEN]; char databuf[UPCALL_BUF_LEN];
}; };
static int get_pipe_version(void) static int get_pipe_version(struct net *net)
{ {
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
int ret; int ret;
spin_lock(&pipe_version_lock); spin_lock(&pipe_version_lock);
if (pipe_version >= 0) { if (sn->pipe_version >= 0) {
atomic_inc(&pipe_users); atomic_inc(&sn->pipe_users);
ret = pipe_version; ret = sn->pipe_version;
} else } else
ret = -EAGAIN; ret = -EAGAIN;
spin_unlock(&pipe_version_lock); spin_unlock(&pipe_version_lock);
return ret; return ret;
} }
static void put_pipe_version(void) static void put_pipe_version(struct net *net)
{ {
if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
pipe_version = -1;
if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
sn->pipe_version = -1;
spin_unlock(&pipe_version_lock); spin_unlock(&pipe_version_lock);
} }
} }
...@@ -291,9 +294,10 @@ static void put_pipe_version(void) ...@@ -291,9 +294,10 @@ static void put_pipe_version(void)
static void static void
gss_release_msg(struct gss_upcall_msg *gss_msg) gss_release_msg(struct gss_upcall_msg *gss_msg)
{ {
struct net *net = rpc_net_ns(gss_msg->auth->client);
if (!atomic_dec_and_test(&gss_msg->count)) if (!atomic_dec_and_test(&gss_msg->count))
return; return;
put_pipe_version(); put_pipe_version(net);
BUG_ON(!list_empty(&gss_msg->list)); BUG_ON(!list_empty(&gss_msg->list));
if (gss_msg->ctx != NULL) if (gss_msg->ctx != NULL)
gss_put_ctx(gss_msg->ctx); gss_put_ctx(gss_msg->ctx);
...@@ -439,7 +443,10 @@ static void gss_encode_msg(struct gss_upcall_msg *gss_msg, ...@@ -439,7 +443,10 @@ static void gss_encode_msg(struct gss_upcall_msg *gss_msg,
struct rpc_clnt *clnt, struct rpc_clnt *clnt,
const char *service_name) const char *service_name)
{ {
if (pipe_version == 0) struct net *net = rpc_net_ns(clnt);
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
if (sn->pipe_version == 0)
gss_encode_v0_msg(gss_msg); gss_encode_v0_msg(gss_msg);
else /* pipe_version == 1 */ else /* pipe_version == 1 */
gss_encode_v1_msg(gss_msg, clnt, service_name); gss_encode_v1_msg(gss_msg, clnt, service_name);
...@@ -455,7 +462,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt, ...@@ -455,7 +462,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt,
gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
if (gss_msg == NULL) if (gss_msg == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
vers = get_pipe_version(); vers = get_pipe_version(rpc_net_ns(clnt));
if (vers < 0) { if (vers < 0) {
kfree(gss_msg); kfree(gss_msg);
return ERR_PTR(vers); return ERR_PTR(vers);
...@@ -559,24 +566,34 @@ gss_refresh_upcall(struct rpc_task *task) ...@@ -559,24 +566,34 @@ gss_refresh_upcall(struct rpc_task *task)
static inline int static inline int
gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
{ {
struct net *net = rpc_net_ns(gss_auth->client);
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
struct rpc_pipe *pipe; struct rpc_pipe *pipe;
struct rpc_cred *cred = &gss_cred->gc_base; struct rpc_cred *cred = &gss_cred->gc_base;
struct gss_upcall_msg *gss_msg; struct gss_upcall_msg *gss_msg;
unsigned long timeout;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
int err = 0; int err;
dprintk("RPC: %s for uid %u\n", dprintk("RPC: %s for uid %u\n",
__func__, from_kuid(&init_user_ns, cred->cr_uid)); __func__, from_kuid(&init_user_ns, cred->cr_uid));
retry: retry:
err = 0;
/* Default timeout is 15s unless we know that gssd is not running */
timeout = 15 * HZ;
if (!sn->gssd_running)
timeout = HZ >> 2;
gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
if (PTR_ERR(gss_msg) == -EAGAIN) { if (PTR_ERR(gss_msg) == -EAGAIN) {
err = wait_event_interruptible_timeout(pipe_version_waitqueue, err = wait_event_interruptible_timeout(pipe_version_waitqueue,
pipe_version >= 0, 15*HZ); sn->pipe_version >= 0, timeout);
if (pipe_version < 0) { if (sn->pipe_version < 0) {
if (err == 0)
sn->gssd_running = 0;
warn_gssd(); warn_gssd();
err = -EACCES; err = -EACCES;
} }
if (err) if (err < 0)
goto out; goto out;
goto retry; goto retry;
} }
...@@ -707,20 +724,22 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) ...@@ -707,20 +724,22 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
static int gss_pipe_open(struct inode *inode, int new_version) static int gss_pipe_open(struct inode *inode, int new_version)
{ {
struct net *net = inode->i_sb->s_fs_info;
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
int ret = 0; int ret = 0;
spin_lock(&pipe_version_lock); spin_lock(&pipe_version_lock);
if (pipe_version < 0) { if (sn->pipe_version < 0) {
/* First open of any gss pipe determines the version: */ /* First open of any gss pipe determines the version: */
pipe_version = new_version; sn->pipe_version = new_version;
rpc_wake_up(&pipe_version_rpc_waitqueue); rpc_wake_up(&pipe_version_rpc_waitqueue);
wake_up(&pipe_version_waitqueue); wake_up(&pipe_version_waitqueue);
} else if (pipe_version != new_version) { } else if (sn->pipe_version != new_version) {
/* Trying to open a pipe of a different version */ /* Trying to open a pipe of a different version */
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
atomic_inc(&pipe_users); atomic_inc(&sn->pipe_users);
out: out:
spin_unlock(&pipe_version_lock); spin_unlock(&pipe_version_lock);
return ret; return ret;
...@@ -740,6 +759,7 @@ static int gss_pipe_open_v1(struct inode *inode) ...@@ -740,6 +759,7 @@ static int gss_pipe_open_v1(struct inode *inode)
static void static void
gss_pipe_release(struct inode *inode) gss_pipe_release(struct inode *inode)
{ {
struct net *net = inode->i_sb->s_fs_info;
struct rpc_pipe *pipe = RPC_I(inode)->pipe; struct rpc_pipe *pipe = RPC_I(inode)->pipe;
struct gss_upcall_msg *gss_msg; struct gss_upcall_msg *gss_msg;
...@@ -758,7 +778,7 @@ gss_pipe_release(struct inode *inode) ...@@ -758,7 +778,7 @@ gss_pipe_release(struct inode *inode)
} }
spin_unlock(&pipe->lock); spin_unlock(&pipe->lock);
put_pipe_version(); put_pipe_version(net);
} }
static void static void
......
...@@ -28,7 +28,11 @@ struct sunrpc_net { ...@@ -28,7 +28,11 @@ struct sunrpc_net {
wait_queue_head_t gssp_wq; wait_queue_head_t gssp_wq;
struct rpc_clnt *gssp_clnt; struct rpc_clnt *gssp_clnt;
int use_gss_proxy; int use_gss_proxy;
int pipe_version;
atomic_t pipe_users;
struct proc_dir_entry *use_gssp_proc; struct proc_dir_entry *use_gssp_proc;
unsigned int gssd_running;
}; };
extern int sunrpc_net_id; extern int sunrpc_net_id;
......
...@@ -216,11 +216,14 @@ rpc_destroy_inode(struct inode *inode) ...@@ -216,11 +216,14 @@ rpc_destroy_inode(struct inode *inode)
static int static int
rpc_pipe_open(struct inode *inode, struct file *filp) rpc_pipe_open(struct inode *inode, struct file *filp)
{ {
struct net *net = inode->i_sb->s_fs_info;
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
struct rpc_pipe *pipe; struct rpc_pipe *pipe;
int first_open; int first_open;
int res = -ENXIO; int res = -ENXIO;
mutex_lock(&inode->i_mutex); mutex_lock(&inode->i_mutex);
sn->gssd_running = 1;
pipe = RPC_I(inode)->pipe; pipe = RPC_I(inode)->pipe;
if (pipe == NULL) if (pipe == NULL)
goto out; goto out;
...@@ -1069,6 +1072,8 @@ void rpc_pipefs_init_net(struct net *net) ...@@ -1069,6 +1072,8 @@ void rpc_pipefs_init_net(struct net *net)
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
mutex_init(&sn->pipefs_sb_lock); mutex_init(&sn->pipefs_sb_lock);
sn->gssd_running = 1;
sn->pipe_version = -1;
} }
/* /*
......
...@@ -324,11 +324,17 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); ...@@ -324,11 +324,17 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
* Note: If the task is ASYNC, and is being made runnable after sitting on an * Note: If the task is ASYNC, and is being made runnable after sitting on an
* rpc_wait_queue, this must be called with the queue spinlock held to protect * rpc_wait_queue, this must be called with the queue spinlock held to protect
* the wait queue operation. * the wait queue operation.
* Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
* which is needed to ensure that __rpc_execute() doesn't loop (due to the
* lockless RPC_IS_QUEUED() test) before we've had a chance to test
* the RPC_TASK_RUNNING flag.
*/ */
static void rpc_make_runnable(struct rpc_task *task) static void rpc_make_runnable(struct rpc_task *task)
{ {
bool need_wakeup = !rpc_test_and_set_running(task);
rpc_clear_queued(task); rpc_clear_queued(task);
if (rpc_test_and_set_running(task)) if (!need_wakeup)
return; return;
if (RPC_IS_ASYNC(task)) { if (RPC_IS_ASYNC(task)) {
INIT_WORK(&task->u.tk_work, rpc_async_schedule); INIT_WORK(&task->u.tk_work, rpc_async_schedule);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment