Commit 5f40d420 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'bugfixes' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6

* 'bugfixes' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6:
  NFS: NFSROOT should default to "proto=udp"
  nfs4: remove duplicated #include
  NFSv4: nfs4_state_mark_reclaim_nograce() should be static
  NFSv4: Fix the setlk error handler
  NFSv4.1: Fix the handling of the SEQUENCE status bits
  NFSv4/4.1: Fix nfs4_schedule_state_recovery abuses
  NFSv4.1 reclaim complete must wait for completion
  NFSv4: remove duplicate clientid in struct nfs_client
  NFSv4.1: Retry CREATE_SESSION on NFS4ERR_DELAY
  sunrpc: Propagate errors from xs_bind() through xs_create_sock()
  (try3-resend) Fix nfs_compat_user_ino64 so it doesn't cause problems if bit 31 or 63 are set in fileid
  nfs: fix compilation warning
  nfs: add kmalloc return value check in decode_and_add_ds
  SUNRPC: Remove resource leak in svc_rdma_send_error()
  nfs: close NFSv4 COMMIT vs. CLOSE race
  SUNRPC: Close a race in __rpc_wait_for_completion_task()
parents 215fd2fa 53d47375
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/inet.h> #include <linux/inet.h>
#include <linux/nfs_xdr.h> #include <linux/nfs_xdr.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/compat.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -89,7 +90,11 @@ int nfs_wait_bit_killable(void *word) ...@@ -89,7 +90,11 @@ int nfs_wait_bit_killable(void *word)
*/ */
u64 nfs_compat_user_ino64(u64 fileid) u64 nfs_compat_user_ino64(u64 fileid)
{ {
int ino; #ifdef CONFIG_COMPAT
compat_ulong_t ino;
#else
unsigned long ino;
#endif
if (enable_ino64) if (enable_ino64)
return fileid; return fileid;
......
...@@ -298,6 +298,11 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp); ...@@ -298,6 +298,11 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
#if defined(CONFIG_NFS_V4_1) #if defined(CONFIG_NFS_V4_1)
struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp); struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp); struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
extern void nfs4_schedule_session_recovery(struct nfs4_session *);
#else
static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
{
}
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4_1 */
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
...@@ -307,10 +312,9 @@ extern void nfs4_put_open_state(struct nfs4_state *); ...@@ -307,10 +312,9 @@ extern void nfs4_put_open_state(struct nfs4_state *);
extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t); extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t);
extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t); extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t);
extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
extern void nfs4_schedule_state_recovery(struct nfs_client *); extern void nfs4_schedule_lease_recovery(struct nfs_client *);
extern void nfs4_schedule_state_manager(struct nfs_client *); extern void nfs4_schedule_state_manager(struct nfs_client *);
extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state);
extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
extern void nfs41_handle_recall_slot(struct nfs_client *clp); extern void nfs41_handle_recall_slot(struct nfs_client *clp);
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
......
...@@ -219,6 +219,10 @@ decode_and_add_ds(__be32 **pp, struct inode *inode) ...@@ -219,6 +219,10 @@ decode_and_add_ds(__be32 **pp, struct inode *inode)
goto out_err; goto out_err;
} }
buf = kmalloc(rlen + 1, GFP_KERNEL); buf = kmalloc(rlen + 1, GFP_KERNEL);
if (!buf) {
dprintk("%s: Not enough memory\n", __func__);
goto out_err;
}
buf[rlen] = '\0'; buf[rlen] = '\0';
memcpy(buf, r_addr, rlen); memcpy(buf, r_addr, rlen);
......
...@@ -51,7 +51,6 @@ ...@@ -51,7 +51,6 @@
#include <linux/sunrpc/bc_xprt.h> #include <linux/sunrpc/bc_xprt.h>
#include <linux/xattr.h> #include <linux/xattr.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/mm.h>
#include "nfs4_fs.h" #include "nfs4_fs.h"
#include "delegation.h" #include "delegation.h"
...@@ -257,12 +256,13 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, ...@@ -257,12 +256,13 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
case -NFS4ERR_OPENMODE: case -NFS4ERR_OPENMODE:
if (state == NULL) if (state == NULL)
break; break;
nfs4_state_mark_reclaim_nograce(clp, state); nfs4_schedule_stateid_recovery(server, state);
goto do_state_recovery; goto wait_on_recovery;
case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_EXPIRED: case -NFS4ERR_EXPIRED:
goto do_state_recovery; nfs4_schedule_lease_recovery(clp);
goto wait_on_recovery;
#if defined(CONFIG_NFS_V4_1) #if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT: case -NFS4ERR_BADSLOT:
...@@ -273,7 +273,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, ...@@ -273,7 +273,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
case -NFS4ERR_SEQ_MISORDERED: case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR: %d Reset session\n", __func__, dprintk("%s ERROR: %d Reset session\n", __func__,
errorcode); errorcode);
nfs4_schedule_state_recovery(clp); nfs4_schedule_session_recovery(clp->cl_session);
exception->retry = 1; exception->retry = 1;
break; break;
#endif /* defined(CONFIG_NFS_V4_1) */ #endif /* defined(CONFIG_NFS_V4_1) */
...@@ -296,8 +296,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, ...@@ -296,8 +296,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
} }
/* We failed to handle the error */ /* We failed to handle the error */
return nfs4_map_errors(ret); return nfs4_map_errors(ret);
do_state_recovery: wait_on_recovery:
nfs4_schedule_state_recovery(clp);
ret = nfs4_wait_clnt_recover(clp); ret = nfs4_wait_clnt_recover(clp);
if (ret == 0) if (ret == 0)
exception->retry = 1; exception->retry = 1;
...@@ -436,8 +435,8 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * ...@@ -436,8 +435,8 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *
clp = res->sr_session->clp; clp = res->sr_session->clp;
do_renew_lease(clp, timestamp); do_renew_lease(clp, timestamp);
/* Check sequence flags */ /* Check sequence flags */
if (atomic_read(&clp->cl_count) > 1) if (res->sr_status_flags != 0)
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); nfs4_schedule_lease_recovery(clp);
break; break;
case -NFS4ERR_DELAY: case -NFS4ERR_DELAY:
/* The server detected a resend of the RPC call and /* The server detected a resend of the RPC call and
...@@ -1256,14 +1255,13 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state ...@@ -1256,14 +1255,13 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION: case -NFS4ERR_DEADSESSION:
nfs4_schedule_state_recovery( nfs4_schedule_session_recovery(server->nfs_client->cl_session);
server->nfs_client);
goto out; goto out;
case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED: case -NFS4ERR_EXPIRED:
/* Don't recall a delegation if it was lost */ /* Don't recall a delegation if it was lost */
nfs4_schedule_state_recovery(server->nfs_client); nfs4_schedule_lease_recovery(server->nfs_client);
goto out; goto out;
case -ERESTARTSYS: case -ERESTARTSYS:
/* /*
...@@ -1272,7 +1270,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state ...@@ -1272,7 +1270,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
*/ */
case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID: case -NFS4ERR_BAD_STATEID:
nfs4_state_mark_reclaim_nograce(server->nfs_client, state); nfs4_schedule_stateid_recovery(server, state);
case -EKEYEXPIRED: case -EKEYEXPIRED:
/* /*
* User RPCSEC_GSS context has expired. * User RPCSEC_GSS context has expired.
...@@ -1588,7 +1586,7 @@ static int nfs4_recover_expired_lease(struct nfs_server *server) ...@@ -1588,7 +1586,7 @@ static int nfs4_recover_expired_lease(struct nfs_server *server)
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
!test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
break; break;
nfs4_schedule_state_recovery(clp); nfs4_schedule_state_manager(clp);
ret = -EIO; ret = -EIO;
} }
return ret; return ret;
...@@ -3179,7 +3177,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata) ...@@ -3179,7 +3177,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata)
if (task->tk_status < 0) { if (task->tk_status < 0) {
/* Unless we're shutting down, schedule state recovery! */ /* Unless we're shutting down, schedule state recovery! */
if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0)
nfs4_schedule_state_recovery(clp); nfs4_schedule_lease_recovery(clp);
return; return;
} }
do_renew_lease(clp, timestamp); do_renew_lease(clp, timestamp);
...@@ -3262,7 +3260,7 @@ static int buf_to_pages_noslab(const void *buf, size_t buflen, ...@@ -3262,7 +3260,7 @@ static int buf_to_pages_noslab(const void *buf, size_t buflen,
spages = pages; spages = pages;
do { do {
len = min(PAGE_CACHE_SIZE, buflen); len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
newpage = alloc_page(GFP_KERNEL); newpage = alloc_page(GFP_KERNEL);
if (newpage == NULL) if (newpage == NULL)
...@@ -3504,12 +3502,13 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, ...@@ -3504,12 +3502,13 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
case -NFS4ERR_OPENMODE: case -NFS4ERR_OPENMODE:
if (state == NULL) if (state == NULL)
break; break;
nfs4_state_mark_reclaim_nograce(clp, state); nfs4_schedule_stateid_recovery(server, state);
goto do_state_recovery; goto wait_on_recovery;
case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_EXPIRED: case -NFS4ERR_EXPIRED:
goto do_state_recovery; nfs4_schedule_lease_recovery(clp);
goto wait_on_recovery;
#if defined(CONFIG_NFS_V4_1) #if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT: case -NFS4ERR_BADSLOT:
...@@ -3520,7 +3519,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, ...@@ -3520,7 +3519,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
case -NFS4ERR_SEQ_MISORDERED: case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session\n", __func__, dprintk("%s ERROR %d, Reset session\n", __func__,
task->tk_status); task->tk_status);
nfs4_schedule_state_recovery(clp); nfs4_schedule_session_recovery(clp->cl_session);
task->tk_status = 0; task->tk_status = 0;
return -EAGAIN; return -EAGAIN;
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4_1 */
...@@ -3537,9 +3536,8 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, ...@@ -3537,9 +3536,8 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
} }
task->tk_status = nfs4_map_errors(task->tk_status); task->tk_status = nfs4_map_errors(task->tk_status);
return 0; return 0;
do_state_recovery: wait_on_recovery:
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
nfs4_schedule_state_recovery(clp);
if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
task->tk_status = 0; task->tk_status = 0;
...@@ -4150,7 +4148,7 @@ static void nfs4_lock_release(void *calldata) ...@@ -4150,7 +4148,7 @@ static void nfs4_lock_release(void *calldata)
task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
data->arg.lock_seqid); data->arg.lock_seqid);
if (!IS_ERR(task)) if (!IS_ERR(task))
rpc_put_task(task); rpc_put_task_async(task);
dprintk("%s: cancelling lock!\n", __func__); dprintk("%s: cancelling lock!\n", __func__);
} else } else
nfs_free_seqid(data->arg.lock_seqid); nfs_free_seqid(data->arg.lock_seqid);
...@@ -4174,23 +4172,18 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = { ...@@ -4174,23 +4172,18 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = {
static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
{ {
struct nfs_client *clp = server->nfs_client;
struct nfs4_state *state = lsp->ls_state;
switch (error) { switch (error) {
case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID: case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_EXPIRED: lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
if (new_lock_owner != 0 || if (new_lock_owner != 0 ||
(lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
nfs4_state_mark_reclaim_nograce(clp, state); nfs4_schedule_stateid_recovery(server, lsp->ls_state);
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
break; break;
case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_STATEID:
if (new_lock_owner != 0 ||
(lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
nfs4_state_mark_reclaim_reboot(clp, state);
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
case -NFS4ERR_EXPIRED:
nfs4_schedule_lease_recovery(server->nfs_client);
}; };
} }
...@@ -4406,12 +4399,14 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) ...@@ -4406,12 +4399,14 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
case -NFS4ERR_EXPIRED: case -NFS4ERR_EXPIRED:
case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_STATEID:
nfs4_schedule_lease_recovery(server->nfs_client);
goto out;
case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT: case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION: case -NFS4ERR_DEADSESSION:
nfs4_schedule_state_recovery(server->nfs_client); nfs4_schedule_session_recovery(server->nfs_client->cl_session);
goto out; goto out;
case -ERESTARTSYS: case -ERESTARTSYS:
/* /*
...@@ -4421,7 +4416,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) ...@@ -4421,7 +4416,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID: case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OPENMODE: case -NFS4ERR_OPENMODE:
nfs4_state_mark_reclaim_nograce(server->nfs_client, state); nfs4_schedule_stateid_recovery(server, state);
err = 0; err = 0;
goto out; goto out;
case -EKEYEXPIRED: case -EKEYEXPIRED:
...@@ -5028,10 +5023,20 @@ int nfs4_proc_create_session(struct nfs_client *clp) ...@@ -5028,10 +5023,20 @@ int nfs4_proc_create_session(struct nfs_client *clp)
int status; int status;
unsigned *ptr; unsigned *ptr;
struct nfs4_session *session = clp->cl_session; struct nfs4_session *session = clp->cl_session;
long timeout = 0;
int err;
dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
status = _nfs4_proc_create_session(clp); do {
status = _nfs4_proc_create_session(clp);
if (status == -NFS4ERR_DELAY) {
err = nfs4_delay(clp->cl_rpcclient, &timeout);
if (err)
status = err;
}
} while (status == -NFS4ERR_DELAY);
if (status) if (status)
goto out; goto out;
...@@ -5140,7 +5145,7 @@ static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client ...@@ -5140,7 +5145,7 @@ static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client
rpc_delay(task, NFS4_POLL_RETRY_MAX); rpc_delay(task, NFS4_POLL_RETRY_MAX);
return -EAGAIN; return -EAGAIN;
default: default:
nfs4_schedule_state_recovery(clp); nfs4_schedule_lease_recovery(clp);
} }
return 0; return 0;
} }
...@@ -5227,7 +5232,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr ...@@ -5227,7 +5232,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
if (IS_ERR(task)) if (IS_ERR(task))
ret = PTR_ERR(task); ret = PTR_ERR(task);
else else
rpc_put_task(task); rpc_put_task_async(task);
dprintk("<-- %s status=%d\n", __func__, ret); dprintk("<-- %s status=%d\n", __func__, ret);
return ret; return ret;
} }
...@@ -5243,8 +5248,13 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) ...@@ -5243,8 +5248,13 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
goto out; goto out;
} }
ret = rpc_wait_for_completion_task(task); ret = rpc_wait_for_completion_task(task);
if (!ret) if (!ret) {
struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
if (task->tk_status == 0)
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
ret = task->tk_status; ret = task->tk_status;
}
rpc_put_task(task); rpc_put_task(task);
out: out:
dprintk("<-- %s status=%d\n", __func__, ret); dprintk("<-- %s status=%d\n", __func__, ret);
...@@ -5281,7 +5291,7 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf ...@@ -5281,7 +5291,7 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
rpc_delay(task, NFS4_POLL_RETRY_MAX); rpc_delay(task, NFS4_POLL_RETRY_MAX);
return -EAGAIN; return -EAGAIN;
default: default:
nfs4_schedule_state_recovery(clp); nfs4_schedule_lease_recovery(clp);
} }
return 0; return 0;
} }
...@@ -5349,6 +5359,9 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp) ...@@ -5349,6 +5359,9 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
status = PTR_ERR(task); status = PTR_ERR(task);
goto out; goto out;
} }
status = nfs4_wait_for_completion_rpc_task(task);
if (status == 0)
status = task->tk_status;
rpc_put_task(task); rpc_put_task(task);
return 0; return 0;
out: out:
......
...@@ -1007,9 +1007,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) ...@@ -1007,9 +1007,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
} }
/* /*
* Schedule a state recovery attempt * Schedule a lease recovery attempt
*/ */
void nfs4_schedule_state_recovery(struct nfs_client *clp) void nfs4_schedule_lease_recovery(struct nfs_client *clp)
{ {
if (!clp) if (!clp)
return; return;
...@@ -1018,7 +1018,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp) ...@@ -1018,7 +1018,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp)
nfs4_schedule_state_manager(clp); nfs4_schedule_state_manager(clp);
} }
int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
{ {
set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
...@@ -1032,7 +1032,7 @@ int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *st ...@@ -1032,7 +1032,7 @@ int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *st
return 1; return 1;
} }
int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
{ {
set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
...@@ -1041,6 +1041,14 @@ int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *s ...@@ -1041,6 +1041,14 @@ int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *s
return 1; return 1;
} }
void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state)
{
struct nfs_client *clp = server->nfs_client;
nfs4_state_mark_reclaim_nograce(clp, state);
nfs4_schedule_state_manager(clp);
}
static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
{ {
struct inode *inode = state->inode; struct inode *inode = state->inode;
...@@ -1436,10 +1444,15 @@ static int nfs4_reclaim_lease(struct nfs_client *clp) ...@@ -1436,10 +1444,15 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
} }
#ifdef CONFIG_NFS_V4_1 #ifdef CONFIG_NFS_V4_1
void nfs4_schedule_session_recovery(struct nfs4_session *session)
{
nfs4_schedule_lease_recovery(session->clp);
}
void nfs41_handle_recall_slot(struct nfs_client *clp) void nfs41_handle_recall_slot(struct nfs_client *clp)
{ {
set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
nfs4_schedule_state_recovery(clp); nfs4_schedule_state_manager(clp);
} }
static void nfs4_reset_all_state(struct nfs_client *clp) static void nfs4_reset_all_state(struct nfs_client *clp)
...@@ -1447,7 +1460,7 @@ static void nfs4_reset_all_state(struct nfs_client *clp) ...@@ -1447,7 +1460,7 @@ static void nfs4_reset_all_state(struct nfs_client *clp)
if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
clp->cl_boot_time = CURRENT_TIME; clp->cl_boot_time = CURRENT_TIME;
nfs4_state_start_reclaim_nograce(clp); nfs4_state_start_reclaim_nograce(clp);
nfs4_schedule_state_recovery(clp); nfs4_schedule_state_manager(clp);
} }
} }
...@@ -1455,7 +1468,7 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp) ...@@ -1455,7 +1468,7 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
{ {
if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
nfs4_state_start_reclaim_reboot(clp); nfs4_state_start_reclaim_reboot(clp);
nfs4_schedule_state_recovery(clp); nfs4_schedule_state_manager(clp);
} }
} }
...@@ -1475,7 +1488,7 @@ static void nfs41_handle_cb_path_down(struct nfs_client *clp) ...@@ -1475,7 +1488,7 @@ static void nfs41_handle_cb_path_down(struct nfs_client *clp)
{ {
nfs_expire_all_delegations(clp); nfs_expire_all_delegations(clp);
if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
nfs4_schedule_state_recovery(clp); nfs4_schedule_state_manager(clp);
} }
void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
......
...@@ -1660,7 +1660,7 @@ static void encode_create_session(struct xdr_stream *xdr, ...@@ -1660,7 +1660,7 @@ static void encode_create_session(struct xdr_stream *xdr,
p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12); p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12);
*p++ = cpu_to_be32(OP_CREATE_SESSION); *p++ = cpu_to_be32(OP_CREATE_SESSION);
p = xdr_encode_hyper(p, clp->cl_ex_clid); p = xdr_encode_hyper(p, clp->cl_clientid);
*p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */
*p++ = cpu_to_be32(args->flags); /*flags */ *p++ = cpu_to_be32(args->flags); /*flags */
...@@ -4694,7 +4694,7 @@ static int decode_exchange_id(struct xdr_stream *xdr, ...@@ -4694,7 +4694,7 @@ static int decode_exchange_id(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 8); p = xdr_inline_decode(xdr, 8);
if (unlikely(!p)) if (unlikely(!p))
goto out_overflow; goto out_overflow;
xdr_decode_hyper(p, &clp->cl_ex_clid); xdr_decode_hyper(p, &clp->cl_clientid);
p = xdr_inline_decode(xdr, 12); p = xdr_inline_decode(xdr, 12);
if (unlikely(!p)) if (unlikely(!p))
goto out_overflow; goto out_overflow;
......
...@@ -86,11 +86,14 @@ ...@@ -86,11 +86,14 @@
/* Default path we try to mount. "%s" gets replaced by our IP address */ /* Default path we try to mount. "%s" gets replaced by our IP address */
#define NFS_ROOT "/tftpboot/%s" #define NFS_ROOT "/tftpboot/%s"
/* Default NFSROOT mount options. */
#define NFS_DEF_OPTIONS "udp"
/* Parameters passed from the kernel command line */ /* Parameters passed from the kernel command line */
static char nfs_root_parms[256] __initdata = ""; static char nfs_root_parms[256] __initdata = "";
/* Text-based mount options passed to super.c */ /* Text-based mount options passed to super.c */
static char nfs_root_options[256] __initdata = ""; static char nfs_root_options[256] __initdata = NFS_DEF_OPTIONS;
/* Address of NFS server */ /* Address of NFS server */
static __be32 servaddr __initdata = htonl(INADDR_NONE); static __be32 servaddr __initdata = htonl(INADDR_NONE);
...@@ -160,8 +163,14 @@ static int __init root_nfs_copy(char *dest, const char *src, ...@@ -160,8 +163,14 @@ static int __init root_nfs_copy(char *dest, const char *src,
} }
static int __init root_nfs_cat(char *dest, const char *src, static int __init root_nfs_cat(char *dest, const char *src,
const size_t destlen) const size_t destlen)
{ {
size_t len = strlen(dest);
if (len && dest[len - 1] != ',')
if (strlcat(dest, ",", destlen) > destlen)
return -1;
if (strlcat(dest, src, destlen) > destlen) if (strlcat(dest, src, destlen) > destlen)
return -1; return -1;
return 0; return 0;
...@@ -194,16 +203,6 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath, ...@@ -194,16 +203,6 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath,
if (root_nfs_cat(nfs_root_options, incoming, if (root_nfs_cat(nfs_root_options, incoming,
sizeof(nfs_root_options))) sizeof(nfs_root_options)))
return -1; return -1;
/*
* Possibly prepare for more options to be appended
*/
if (nfs_root_options[0] != '\0' &&
nfs_root_options[strlen(nfs_root_options)] != ',')
if (root_nfs_cat(nfs_root_options, ",",
sizeof(nfs_root_options)))
return -1;
return 0; return 0;
} }
...@@ -217,7 +216,7 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath, ...@@ -217,7 +216,7 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath,
*/ */
static int __init root_nfs_data(char *cmdline) static int __init root_nfs_data(char *cmdline)
{ {
char addr_option[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1]; char mand_options[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1];
int len, retval = -1; int len, retval = -1;
char *tmp = NULL; char *tmp = NULL;
const size_t tmplen = sizeof(nfs_export_path); const size_t tmplen = sizeof(nfs_export_path);
...@@ -244,9 +243,9 @@ static int __init root_nfs_data(char *cmdline) ...@@ -244,9 +243,9 @@ static int __init root_nfs_data(char *cmdline)
* Append mandatory options for nfsroot so they override * Append mandatory options for nfsroot so they override
* what has come before * what has come before
*/ */
snprintf(addr_option, sizeof(addr_option), "nolock,addr=%pI4", snprintf(mand_options, sizeof(mand_options), "nolock,addr=%pI4",
&servaddr); &servaddr);
if (root_nfs_cat(nfs_root_options, addr_option, if (root_nfs_cat(nfs_root_options, mand_options,
sizeof(nfs_root_options))) sizeof(nfs_root_options)))
goto out_optionstoolong; goto out_optionstoolong;
......
...@@ -180,7 +180,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n ...@@ -180,7 +180,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n
task_setup_data.rpc_client = NFS_CLIENT(dir); task_setup_data.rpc_client = NFS_CLIENT(dir);
task = rpc_run_task(&task_setup_data); task = rpc_run_task(&task_setup_data);
if (!IS_ERR(task)) if (!IS_ERR(task))
rpc_put_task(task); rpc_put_task_async(task);
return 1; return 1;
} }
......
...@@ -1292,6 +1292,8 @@ static int nfs_commit_rpcsetup(struct list_head *head, ...@@ -1292,6 +1292,8 @@ static int nfs_commit_rpcsetup(struct list_head *head,
task = rpc_run_task(&task_setup_data); task = rpc_run_task(&task_setup_data);
if (IS_ERR(task)) if (IS_ERR(task))
return PTR_ERR(task); return PTR_ERR(task);
if (how & FLUSH_SYNC)
rpc_wait_for_completion_task(task);
rpc_put_task(task); rpc_put_task(task);
return 0; return 0;
} }
......
...@@ -68,11 +68,7 @@ struct nfs_client { ...@@ -68,11 +68,7 @@ struct nfs_client {
unsigned char cl_id_uniquifier; unsigned char cl_id_uniquifier;
u32 cl_cb_ident; /* v4.0 callback identifier */ u32 cl_cb_ident; /* v4.0 callback identifier */
const struct nfs4_minor_version_ops *cl_mvops; const struct nfs4_minor_version_ops *cl_mvops;
#endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_V4_1
/* clientid returned from EXCHANGE_ID, used by session operations */
u64 cl_ex_clid;
/* The sequence id to use for the next CREATE_SESSION */ /* The sequence id to use for the next CREATE_SESSION */
u32 cl_seqid; u32 cl_seqid;
/* The flags used for obtaining the clientid during EXCHANGE_ID */ /* The flags used for obtaining the clientid during EXCHANGE_ID */
...@@ -80,7 +76,7 @@ struct nfs_client { ...@@ -80,7 +76,7 @@ struct nfs_client {
struct nfs4_session *cl_session; /* sharred session */ struct nfs4_session *cl_session; /* sharred session */
struct list_head cl_layouts; struct list_head cl_layouts;
struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */ struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_FSCACHE #ifdef CONFIG_NFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */ struct fscache_cookie *fscache; /* client index cache cookie */
...@@ -185,7 +181,7 @@ struct nfs_server { ...@@ -185,7 +181,7 @@ struct nfs_server {
/* maximum number of slots to use */ /* maximum number of slots to use */
#define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE #define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE
#if defined(CONFIG_NFS_V4_1) #if defined(CONFIG_NFS_V4)
/* Sessions */ /* Sessions */
#define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long))) #define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long)))
...@@ -225,5 +221,5 @@ struct nfs4_session { ...@@ -225,5 +221,5 @@ struct nfs4_session {
struct nfs_client *clp; struct nfs_client *clp;
}; };
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4 */
#endif #endif
...@@ -212,6 +212,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *); ...@@ -212,6 +212,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
const struct rpc_call_ops *ops); const struct rpc_call_ops *ops);
void rpc_put_task(struct rpc_task *); void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
void rpc_exit_task(struct rpc_task *); void rpc_exit_task(struct rpc_task *);
void rpc_exit(struct rpc_task *, int); void rpc_exit(struct rpc_task *, int);
void rpc_release_calldata(const struct rpc_call_ops *, void *); void rpc_release_calldata(const struct rpc_call_ops *, void *);
......
...@@ -4213,6 +4213,7 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) ...@@ -4213,6 +4213,7 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
{ {
__wake_up_common(q, mode, 1, 0, key); __wake_up_common(q, mode, 1, 0, key);
} }
EXPORT_SYMBOL_GPL(__wake_up_locked_key);
/** /**
* __wake_up_sync_key - wake up threads blocked on a waitqueue. * __wake_up_sync_key - wake up threads blocked on a waitqueue.
......
...@@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task) ...@@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task)
/* /*
* Mark an RPC call as having completed by clearing the 'active' bit * Mark an RPC call as having completed by clearing the 'active' bit
* and then waking up all tasks that were sleeping.
*/ */
static void rpc_mark_complete_task(struct rpc_task *task) static int rpc_complete_task(struct rpc_task *task)
{ {
smp_mb__before_clear_bit(); void *m = &task->tk_runstate;
wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
unsigned long flags;
int ret;
spin_lock_irqsave(&wq->lock, flags);
clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
smp_mb__after_clear_bit(); ret = atomic_dec_and_test(&task->tk_count);
wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); if (waitqueue_active(wq))
__wake_up_locked_key(wq, TASK_NORMAL, &k);
spin_unlock_irqrestore(&wq->lock, flags);
return ret;
} }
/* /*
* Allow callers to wait for completion of an RPC call * Allow callers to wait for completion of an RPC call
*
* Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
* to enforce taking of the wq->lock and hence avoid races with
* rpc_complete_task().
*/ */
int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
{ {
if (action == NULL) if (action == NULL)
action = rpc_wait_bit_killable; action = rpc_wait_bit_killable;
return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
action, TASK_KILLABLE); action, TASK_KILLABLE);
} }
EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
...@@ -857,34 +871,67 @@ static void rpc_async_release(struct work_struct *work) ...@@ -857,34 +871,67 @@ static void rpc_async_release(struct work_struct *work)
rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
} }
void rpc_put_task(struct rpc_task *task) static void rpc_release_resources_task(struct rpc_task *task)
{ {
if (!atomic_dec_and_test(&task->tk_count))
return;
/* Release resources */
if (task->tk_rqstp) if (task->tk_rqstp)
xprt_release(task); xprt_release(task);
if (task->tk_msg.rpc_cred) if (task->tk_msg.rpc_cred)
put_rpccred(task->tk_msg.rpc_cred); put_rpccred(task->tk_msg.rpc_cred);
rpc_task_release_client(task); rpc_task_release_client(task);
if (task->tk_workqueue != NULL) { }
static void rpc_final_put_task(struct rpc_task *task,
struct workqueue_struct *q)
{
if (q != NULL) {
INIT_WORK(&task->u.tk_work, rpc_async_release); INIT_WORK(&task->u.tk_work, rpc_async_release);
queue_work(task->tk_workqueue, &task->u.tk_work); queue_work(q, &task->u.tk_work);
} else } else
rpc_free_task(task); rpc_free_task(task);
} }
static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
{
if (atomic_dec_and_test(&task->tk_count)) {
rpc_release_resources_task(task);
rpc_final_put_task(task, q);
}
}
void rpc_put_task(struct rpc_task *task)
{
rpc_do_put_task(task, NULL);
}
EXPORT_SYMBOL_GPL(rpc_put_task); EXPORT_SYMBOL_GPL(rpc_put_task);
void rpc_put_task_async(struct rpc_task *task)
{
rpc_do_put_task(task, task->tk_workqueue);
}
EXPORT_SYMBOL_GPL(rpc_put_task_async);
static void rpc_release_task(struct rpc_task *task) static void rpc_release_task(struct rpc_task *task)
{ {
dprintk("RPC: %5u release task\n", task->tk_pid); dprintk("RPC: %5u release task\n", task->tk_pid);
BUG_ON (RPC_IS_QUEUED(task)); BUG_ON (RPC_IS_QUEUED(task));
/* Wake up anyone who is waiting for task completion */ rpc_release_resources_task(task);
rpc_mark_complete_task(task);
rpc_put_task(task); /*
* Note: at this point we have been removed from rpc_clnt->cl_tasks,
* so it should be safe to use task->tk_count as a test for whether
* or not any other processes still hold references to our rpc_task.
*/
if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
/* Wake up anyone who may be waiting for task completion */
if (!rpc_complete_task(task))
return;
} else {
if (!atomic_dec_and_test(&task->tk_count))
return;
}
rpc_final_put_task(task, task->tk_workqueue);
} }
int rpciod_up(void) int rpciod_up(void)
......
...@@ -1335,6 +1335,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, ...@@ -1335,6 +1335,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
p, 0, length, DMA_FROM_DEVICE); p, 0, length, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
put_page(p); put_page(p);
svc_rdma_put_context(ctxt, 1);
return; return;
} }
atomic_inc(&xprt->sc_dma_used); atomic_inc(&xprt->sc_dma_used);
......
...@@ -1631,7 +1631,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt, ...@@ -1631,7 +1631,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
} }
xs_reclassify_socket(family, sock); xs_reclassify_socket(family, sock);
if (xs_bind(transport, sock)) { err = xs_bind(transport, sock);
if (err) {
sock_release(sock); sock_release(sock);
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment