Commit cc7889ff authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'nfs-for-2.6.34' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6

* 'nfs-for-2.6.34' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6: (44 commits)
  NFS: Remove requirement for inode->i_mutex from nfs_invalidate_mapping
  NFS: Clean up nfs_sync_mapping
  NFS: Simplify nfs_wb_page()
  NFS: Replace __nfs_write_mapping with sync_inode()
  NFS: Simplify nfs_wb_page_cancel()
  NFS: Ensure inode is always marked I_DIRTY_DATASYNC, if it has unstable pages
  NFS: Run COMMIT as an asynchronous RPC call when wbc->for_background is set
  NFS: Reduce the number of unnecessary COMMIT calls
  NFS: Add a count of the number of unstable writes carried by an inode
  NFS: Cleanup - move nfs_write_inode() into fs/nfs/write.c
  nfs41 fix NFS4ERR_CLID_INUSE for exchange id
  NFS: Fix an allocation-under-spinlock bug
  SUNRPC: Handle EINVAL error returns from the TCP connect operation
  NFSv4.1: Various fixes to the sequence flag error handling
  nfs4: renewd renew operations should take/put a client reference
  nfs41: renewd sequence operations should take/put client reference
  nfs: prevent backlogging of renewd requests
  nfs: kill renewd before clearing client minor version
  NFS: Make close(2) asynchronous when closing NFS O_DIRECT files
  NFS: Improve NFS iostat byte count accuracy for writes
  ...
parents b13d3c6e 3fa04ecd
...@@ -119,6 +119,14 @@ struct cb_recallanyargs { ...@@ -119,6 +119,14 @@ struct cb_recallanyargs {
}; };
extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy); extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy);
struct cb_recallslotargs {
struct sockaddr *crsa_addr;
uint32_t crsa_target_max_slots;
};
extern unsigned nfs4_callback_recallslot(struct cb_recallslotargs *args,
void *dummy);
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4_1 */
extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res); extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
......
...@@ -143,44 +143,49 @@ int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const n ...@@ -143,44 +143,49 @@ int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const n
* Return success if the sequenceID is one more than what we last saw on * Return success if the sequenceID is one more than what we last saw on
* this slot, accounting for wraparound. Increments the slot's sequence. * this slot, accounting for wraparound. Increments the slot's sequence.
* *
* We don't yet implement a duplicate request cache, so at this time * We don't yet implement a duplicate request cache, instead we set the
* we will log replays, and process them as if we had not seen them before, * back channel ca_maxresponsesize_cached to zero. This is OK for now
* but we don't bump the sequence in the slot. Not too worried about it,
* since we only currently implement idempotent callbacks anyway. * since we only currently implement idempotent callbacks anyway.
* *
* We have a single slot backchannel at this time, so we don't bother * We have a single slot backchannel at this time, so we don't bother
* checking the used_slots bit array on the table. The lower layer guarantees * checking the used_slots bit array on the table. The lower layer guarantees
* a single outstanding callback request at a time. * a single outstanding callback request at a time.
*/ */
static int static __be32
validate_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seqid) validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
{ {
struct nfs4_slot *slot; struct nfs4_slot *slot;
dprintk("%s enter. slotid %d seqid %d\n", dprintk("%s enter. slotid %d seqid %d\n",
__func__, slotid, seqid); __func__, args->csa_slotid, args->csa_sequenceid);
if (slotid > NFS41_BC_MAX_CALLBACKS) if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
return htonl(NFS4ERR_BADSLOT); return htonl(NFS4ERR_BADSLOT);
slot = tbl->slots + slotid; slot = tbl->slots + args->csa_slotid;
dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr); dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr);
/* Normal */ /* Normal */
if (likely(seqid == slot->seq_nr + 1)) { if (likely(args->csa_sequenceid == slot->seq_nr + 1)) {
slot->seq_nr++; slot->seq_nr++;
return htonl(NFS4_OK); return htonl(NFS4_OK);
} }
/* Replay */ /* Replay */
if (seqid == slot->seq_nr) { if (args->csa_sequenceid == slot->seq_nr) {
dprintk("%s seqid %d is a replay - no DRC available\n", dprintk("%s seqid %d is a replay\n",
__func__, seqid); __func__, args->csa_sequenceid);
return htonl(NFS4_OK); /* Signal process_op to set this error on next op */
if (args->csa_cachethis == 0)
return htonl(NFS4ERR_RETRY_UNCACHED_REP);
/* The ca_maxresponsesize_cached is 0 with no DRC */
else if (args->csa_cachethis == 1)
return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
} }
/* Wraparound */ /* Wraparound */
if (seqid == 1 && (slot->seq_nr + 1) == 0) { if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) {
slot->seq_nr = 1; slot->seq_nr = 1;
return htonl(NFS4_OK); return htonl(NFS4_OK);
} }
...@@ -225,27 +230,87 @@ validate_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seqid) ...@@ -225,27 +230,87 @@ validate_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seqid)
return NULL; return NULL;
} }
/* FIXME: referring calls should be processed */ /*
unsigned nfs4_callback_sequence(struct cb_sequenceargs *args, * For each referring call triple, check the session's slot table for
* a match. If the slot is in use and the sequence numbers match, the
* client is still waiting for a response to the original request.
*/
static bool referring_call_exists(struct nfs_client *clp,
uint32_t nrclists,
struct referring_call_list *rclists)
{
bool status = 0;
int i, j;
struct nfs4_session *session;
struct nfs4_slot_table *tbl;
struct referring_call_list *rclist;
struct referring_call *ref;
/*
* XXX When client trunking is implemented, this becomes
* a session lookup from within the loop
*/
session = clp->cl_session;
tbl = &session->fc_slot_table;
for (i = 0; i < nrclists; i++) {
rclist = &rclists[i];
if (memcmp(session->sess_id.data,
rclist->rcl_sessionid.data,
NFS4_MAX_SESSIONID_LEN) != 0)
continue;
for (j = 0; j < rclist->rcl_nrefcalls; j++) {
ref = &rclist->rcl_refcalls[j];
dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u "
"slotid %u\n", __func__,
((u32 *)&rclist->rcl_sessionid.data)[0],
((u32 *)&rclist->rcl_sessionid.data)[1],
((u32 *)&rclist->rcl_sessionid.data)[2],
((u32 *)&rclist->rcl_sessionid.data)[3],
ref->rc_sequenceid, ref->rc_slotid);
spin_lock(&tbl->slot_tbl_lock);
status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
tbl->slots[ref->rc_slotid].seq_nr ==
ref->rc_sequenceid);
spin_unlock(&tbl->slot_tbl_lock);
if (status)
goto out;
}
}
out:
return status;
}
__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
struct cb_sequenceres *res) struct cb_sequenceres *res)
{ {
struct nfs_client *clp; struct nfs_client *clp;
int i, status; int i;
__be32 status;
for (i = 0; i < args->csa_nrclists; i++)
kfree(args->csa_rclists[i].rcl_refcalls);
kfree(args->csa_rclists);
status = htonl(NFS4ERR_BADSESSION); status = htonl(NFS4ERR_BADSESSION);
clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid); clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid);
if (clp == NULL) if (clp == NULL)
goto out; goto out;
status = validate_seqid(&clp->cl_session->bc_slot_table, status = validate_seqid(&clp->cl_session->bc_slot_table, args);
args->csa_slotid, args->csa_sequenceid);
if (status) if (status)
goto out_putclient; goto out_putclient;
/*
* Check for pending referring calls. If a match is found, a
* related callback was received before the response to the original
* call.
*/
if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
status = htonl(NFS4ERR_DELAY);
goto out_putclient;
}
memcpy(&res->csr_sessionid, &args->csa_sessionid, memcpy(&res->csr_sessionid, &args->csa_sessionid,
sizeof(res->csr_sessionid)); sizeof(res->csr_sessionid));
res->csr_sequenceid = args->csa_sequenceid; res->csr_sequenceid = args->csa_sequenceid;
...@@ -256,15 +321,23 @@ unsigned nfs4_callback_sequence(struct cb_sequenceargs *args, ...@@ -256,15 +321,23 @@ unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
out_putclient: out_putclient:
nfs_put_client(clp); nfs_put_client(clp);
out: out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); for (i = 0; i < args->csa_nrclists; i++)
res->csr_status = status; kfree(args->csa_rclists[i].rcl_refcalls);
return res->csr_status; kfree(args->csa_rclists);
if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP))
res->csr_status = 0;
else
res->csr_status = status;
dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
ntohl(status), ntohl(res->csr_status));
return status;
} }
unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy) __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy)
{ {
struct nfs_client *clp; struct nfs_client *clp;
int status; __be32 status;
fmode_t flags = 0; fmode_t flags = 0;
status = htonl(NFS4ERR_OP_NOT_IN_SESSION); status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
...@@ -289,4 +362,40 @@ unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy) ...@@ -289,4 +362,40 @@ unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy)
dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
return status; return status;
} }
/* Reduce the fore channel's max_slots to the target value */
__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy)
{
struct nfs_client *clp;
struct nfs4_slot_table *fc_tbl;
__be32 status;
status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
clp = nfs_find_client(args->crsa_addr, 4);
if (clp == NULL)
goto out;
dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n",
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
args->crsa_target_max_slots);
fc_tbl = &clp->cl_session->fc_slot_table;
status = htonl(NFS4ERR_BAD_HIGH_SLOT);
if (args->crsa_target_max_slots > fc_tbl->max_slots ||
args->crsa_target_max_slots < 1)
goto out_putclient;
status = htonl(NFS4_OK);
if (args->crsa_target_max_slots == fc_tbl->max_slots)
goto out_putclient;
fc_tbl->target_max_slots = args->crsa_target_max_slots;
nfs41_handle_recall_slot(clp);
out_putclient:
nfs_put_client(clp); /* balance nfs_find_client */
out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
return status;
}
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4_1 */
...@@ -24,10 +24,14 @@ ...@@ -24,10 +24,14 @@
#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ #define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
4 + 1 + 3) 4 + 1 + 3)
#define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) #define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4_1 */
#define NFSDBG_FACILITY NFSDBG_CALLBACK #define NFSDBG_FACILITY NFSDBG_CALLBACK
/* Internal error code */
#define NFS4ERR_RESOURCE_HDR 11050
typedef __be32 (*callback_process_op_t)(void *, void *); typedef __be32 (*callback_process_op_t)(void *, void *);
typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *); typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *);
typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *); typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *);
...@@ -173,7 +177,7 @@ static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op) ...@@ -173,7 +177,7 @@ static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op)
__be32 *p; __be32 *p;
p = read_buf(xdr, 4); p = read_buf(xdr, 4);
if (unlikely(p == NULL)) if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE); return htonl(NFS4ERR_RESOURCE_HDR);
*op = ntohl(*p); *op = ntohl(*p);
return 0; return 0;
} }
...@@ -215,10 +219,10 @@ static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, ...@@ -215,10 +219,10 @@ static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr,
#if defined(CONFIG_NFS_V4_1) #if defined(CONFIG_NFS_V4_1)
static unsigned decode_sessionid(struct xdr_stream *xdr, static __be32 decode_sessionid(struct xdr_stream *xdr,
struct nfs4_sessionid *sid) struct nfs4_sessionid *sid)
{ {
uint32_t *p; __be32 *p;
int len = NFS4_MAX_SESSIONID_LEN; int len = NFS4_MAX_SESSIONID_LEN;
p = read_buf(xdr, len); p = read_buf(xdr, len);
...@@ -229,12 +233,12 @@ static unsigned decode_sessionid(struct xdr_stream *xdr, ...@@ -229,12 +233,12 @@ static unsigned decode_sessionid(struct xdr_stream *xdr,
return 0; return 0;
} }
static unsigned decode_rc_list(struct xdr_stream *xdr, static __be32 decode_rc_list(struct xdr_stream *xdr,
struct referring_call_list *rc_list) struct referring_call_list *rc_list)
{ {
uint32_t *p; __be32 *p;
int i; int i;
unsigned status; __be32 status;
status = decode_sessionid(xdr, &rc_list->rcl_sessionid); status = decode_sessionid(xdr, &rc_list->rcl_sessionid);
if (status) if (status)
...@@ -267,13 +271,13 @@ static unsigned decode_rc_list(struct xdr_stream *xdr, ...@@ -267,13 +271,13 @@ static unsigned decode_rc_list(struct xdr_stream *xdr,
return status; return status;
} }
static unsigned decode_cb_sequence_args(struct svc_rqst *rqstp, static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
struct xdr_stream *xdr, struct xdr_stream *xdr,
struct cb_sequenceargs *args) struct cb_sequenceargs *args)
{ {
uint32_t *p; __be32 *p;
int i; int i;
unsigned status; __be32 status;
status = decode_sessionid(xdr, &args->csa_sessionid); status = decode_sessionid(xdr, &args->csa_sessionid);
if (status) if (status)
...@@ -327,11 +331,11 @@ static unsigned decode_cb_sequence_args(struct svc_rqst *rqstp, ...@@ -327,11 +331,11 @@ static unsigned decode_cb_sequence_args(struct svc_rqst *rqstp,
goto out; goto out;
} }
static unsigned decode_recallany_args(struct svc_rqst *rqstp, static __be32 decode_recallany_args(struct svc_rqst *rqstp,
struct xdr_stream *xdr, struct xdr_stream *xdr,
struct cb_recallanyargs *args) struct cb_recallanyargs *args)
{ {
uint32_t *p; __be32 *p;
args->craa_addr = svc_addr(rqstp); args->craa_addr = svc_addr(rqstp);
p = read_buf(xdr, 4); p = read_buf(xdr, 4);
...@@ -346,6 +350,20 @@ static unsigned decode_recallany_args(struct svc_rqst *rqstp, ...@@ -346,6 +350,20 @@ static unsigned decode_recallany_args(struct svc_rqst *rqstp,
return 0; return 0;
} }
static __be32 decode_recallslot_args(struct svc_rqst *rqstp,
struct xdr_stream *xdr,
struct cb_recallslotargs *args)
{
__be32 *p;
args->crsa_addr = svc_addr(rqstp);
p = read_buf(xdr, 4);
if (unlikely(p == NULL))
return htonl(NFS4ERR_BADXDR);
args->crsa_target_max_slots = ntohl(*p++);
return 0;
}
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4_1 */
static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
...@@ -465,7 +483,7 @@ static __be32 encode_op_hdr(struct xdr_stream *xdr, uint32_t op, __be32 res) ...@@ -465,7 +483,7 @@ static __be32 encode_op_hdr(struct xdr_stream *xdr, uint32_t op, __be32 res)
p = xdr_reserve_space(xdr, 8); p = xdr_reserve_space(xdr, 8);
if (unlikely(p == NULL)) if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE); return htonl(NFS4ERR_RESOURCE_HDR);
*p++ = htonl(op); *p++ = htonl(op);
*p = res; *p = res;
return 0; return 0;
...@@ -499,10 +517,10 @@ static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr, ...@@ -499,10 +517,10 @@ static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr,
#if defined(CONFIG_NFS_V4_1) #if defined(CONFIG_NFS_V4_1)
static unsigned encode_sessionid(struct xdr_stream *xdr, static __be32 encode_sessionid(struct xdr_stream *xdr,
const struct nfs4_sessionid *sid) const struct nfs4_sessionid *sid)
{ {
uint32_t *p; __be32 *p;
int len = NFS4_MAX_SESSIONID_LEN; int len = NFS4_MAX_SESSIONID_LEN;
p = xdr_reserve_space(xdr, len); p = xdr_reserve_space(xdr, len);
...@@ -513,11 +531,11 @@ static unsigned encode_sessionid(struct xdr_stream *xdr, ...@@ -513,11 +531,11 @@ static unsigned encode_sessionid(struct xdr_stream *xdr,
return 0; return 0;
} }
static unsigned encode_cb_sequence_res(struct svc_rqst *rqstp, static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp,
struct xdr_stream *xdr, struct xdr_stream *xdr,
const struct cb_sequenceres *res) const struct cb_sequenceres *res)
{ {
uint32_t *p; __be32 *p;
unsigned status = res->csr_status; unsigned status = res->csr_status;
if (unlikely(status != 0)) if (unlikely(status != 0))
...@@ -554,6 +572,7 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) ...@@ -554,6 +572,7 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
case OP_CB_RECALL: case OP_CB_RECALL:
case OP_CB_SEQUENCE: case OP_CB_SEQUENCE:
case OP_CB_RECALL_ANY: case OP_CB_RECALL_ANY:
case OP_CB_RECALL_SLOT:
*op = &callback_ops[op_nr]; *op = &callback_ops[op_nr];
break; break;
...@@ -562,7 +581,6 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) ...@@ -562,7 +581,6 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
case OP_CB_NOTIFY: case OP_CB_NOTIFY:
case OP_CB_PUSH_DELEG: case OP_CB_PUSH_DELEG:
case OP_CB_RECALLABLE_OBJ_AVAIL: case OP_CB_RECALLABLE_OBJ_AVAIL:
case OP_CB_RECALL_SLOT:
case OP_CB_WANTS_CANCELLED: case OP_CB_WANTS_CANCELLED:
case OP_CB_NOTIFY_LOCK: case OP_CB_NOTIFY_LOCK:
return htonl(NFS4ERR_NOTSUPP); return htonl(NFS4ERR_NOTSUPP);
...@@ -602,20 +620,18 @@ preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op) ...@@ -602,20 +620,18 @@ preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op)
static __be32 process_op(uint32_t minorversion, int nop, static __be32 process_op(uint32_t minorversion, int nop,
struct svc_rqst *rqstp, struct svc_rqst *rqstp,
struct xdr_stream *xdr_in, void *argp, struct xdr_stream *xdr_in, void *argp,
struct xdr_stream *xdr_out, void *resp) struct xdr_stream *xdr_out, void *resp, int* drc_status)
{ {
struct callback_op *op = &callback_ops[0]; struct callback_op *op = &callback_ops[0];
unsigned int op_nr = OP_CB_ILLEGAL; unsigned int op_nr;
__be32 status; __be32 status;
long maxlen; long maxlen;
__be32 res; __be32 res;
dprintk("%s: start\n", __func__); dprintk("%s: start\n", __func__);
status = decode_op_hdr(xdr_in, &op_nr); status = decode_op_hdr(xdr_in, &op_nr);
if (unlikely(status)) { if (unlikely(status))
status = htonl(NFS4ERR_OP_ILLEGAL); return status;
goto out;
}
dprintk("%s: minorversion=%d nop=%d op_nr=%u\n", dprintk("%s: minorversion=%d nop=%d op_nr=%u\n",
__func__, minorversion, nop, op_nr); __func__, minorversion, nop, op_nr);
...@@ -624,19 +640,32 @@ static __be32 process_op(uint32_t minorversion, int nop, ...@@ -624,19 +640,32 @@ static __be32 process_op(uint32_t minorversion, int nop,
preprocess_nfs4_op(op_nr, &op); preprocess_nfs4_op(op_nr, &op);
if (status == htonl(NFS4ERR_OP_ILLEGAL)) if (status == htonl(NFS4ERR_OP_ILLEGAL))
op_nr = OP_CB_ILLEGAL; op_nr = OP_CB_ILLEGAL;
out: if (status)
goto encode_hdr;
if (*drc_status) {
status = *drc_status;
goto encode_hdr;
}
maxlen = xdr_out->end - xdr_out->p; maxlen = xdr_out->end - xdr_out->p;
if (maxlen > 0 && maxlen < PAGE_SIZE) { if (maxlen > 0 && maxlen < PAGE_SIZE) {
if (likely(status == 0 && op->decode_args != NULL)) status = op->decode_args(rqstp, xdr_in, argp);
status = op->decode_args(rqstp, xdr_in, argp); if (likely(status == 0))
if (likely(status == 0 && op->process_op != NULL))
status = op->process_op(argp, resp); status = op->process_op(argp, resp);
} else } else
status = htonl(NFS4ERR_RESOURCE); status = htonl(NFS4ERR_RESOURCE);
/* Only set by OP_CB_SEQUENCE processing */
if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
*drc_status = status;
status = 0;
}
encode_hdr:
res = encode_op_hdr(xdr_out, op_nr, status); res = encode_op_hdr(xdr_out, op_nr, status);
if (status == 0) if (unlikely(res))
status = res; return res;
if (op->encode_res != NULL && status == 0) if (op->encode_res != NULL && status == 0)
status = op->encode_res(rqstp, xdr_out, resp); status = op->encode_res(rqstp, xdr_out, resp);
dprintk("%s: done, status = %d\n", __func__, ntohl(status)); dprintk("%s: done, status = %d\n", __func__, ntohl(status));
...@@ -652,7 +681,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r ...@@ -652,7 +681,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
struct cb_compound_hdr_res hdr_res = { NULL }; struct cb_compound_hdr_res hdr_res = { NULL };
struct xdr_stream xdr_in, xdr_out; struct xdr_stream xdr_in, xdr_out;
__be32 *p; __be32 *p;
__be32 status; __be32 status, drc_status = 0;
unsigned int nops = 0; unsigned int nops = 0;
dprintk("%s: start\n", __func__); dprintk("%s: start\n", __func__);
...@@ -672,11 +701,18 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r ...@@ -672,11 +701,18 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
return rpc_system_err; return rpc_system_err;
while (status == 0 && nops != hdr_arg.nops) { while (status == 0 && nops != hdr_arg.nops) {
status = process_op(hdr_arg.minorversion, nops, status = process_op(hdr_arg.minorversion, nops, rqstp,
rqstp, &xdr_in, argp, &xdr_out, resp); &xdr_in, argp, &xdr_out, resp, &drc_status);
nops++; nops++;
} }
/* Buffer overflow in decode_ops_hdr or encode_ops_hdr. Return
* resource error in cb_compound status without returning op */
if (unlikely(status == htonl(NFS4ERR_RESOURCE_HDR))) {
status = htonl(NFS4ERR_RESOURCE);
nops--;
}
*hdr_res.status = status; *hdr_res.status = status;
*hdr_res.nops = htonl(nops); *hdr_res.nops = htonl(nops);
dprintk("%s: done, status = %u\n", __func__, ntohl(status)); dprintk("%s: done, status = %u\n", __func__, ntohl(status));
...@@ -713,6 +749,11 @@ static struct callback_op callback_ops[] = { ...@@ -713,6 +749,11 @@ static struct callback_op callback_ops[] = {
.decode_args = (callback_decode_arg_t)decode_recallany_args, .decode_args = (callback_decode_arg_t)decode_recallany_args,
.res_maxsize = CB_OP_RECALLANY_RES_MAXSZ, .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ,
}, },
[OP_CB_RECALL_SLOT] = {
.process_op = (callback_process_op_t)nfs4_callback_recallslot,
.decode_args = (callback_decode_arg_t)decode_recallslot_args,
.res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ,
},
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4_1 */
}; };
......
...@@ -164,30 +164,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_ ...@@ -164,30 +164,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
return ERR_PTR(err); return ERR_PTR(err);
} }
static void nfs4_shutdown_client(struct nfs_client *clp)
{
#ifdef CONFIG_NFS_V4
if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state))
nfs4_kill_renewd(clp);
BUG_ON(!RB_EMPTY_ROOT(&clp->cl_state_owners));
if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
nfs_idmap_delete(clp);
rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
#endif
}
/*
* Destroy the NFS4 callback service
*/
static void nfs4_destroy_callback(struct nfs_client *clp)
{
#ifdef CONFIG_NFS_V4 #ifdef CONFIG_NFS_V4
if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
nfs_callback_down(clp->cl_minorversion);
#endif /* CONFIG_NFS_V4 */
}
/* /*
* Clears/puts all minor version specific parts from an nfs_client struct * Clears/puts all minor version specific parts from an nfs_client struct
* reverting it to minorversion 0. * reverting it to minorversion 0.
...@@ -202,9 +179,33 @@ static void nfs4_clear_client_minor_version(struct nfs_client *clp) ...@@ -202,9 +179,33 @@ static void nfs4_clear_client_minor_version(struct nfs_client *clp)
clp->cl_call_sync = _nfs4_call_sync; clp->cl_call_sync = _nfs4_call_sync;
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4_1 */
}
/*
* Destroy the NFS4 callback service
*/
static void nfs4_destroy_callback(struct nfs_client *clp)
{
if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
nfs_callback_down(clp->cl_minorversion);
}
static void nfs4_shutdown_client(struct nfs_client *clp)
{
if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state))
nfs4_kill_renewd(clp);
nfs4_clear_client_minor_version(clp);
nfs4_destroy_callback(clp); nfs4_destroy_callback(clp);
if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
nfs_idmap_delete(clp);
rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
} }
#else
static void nfs4_shutdown_client(struct nfs_client *clp)
{
}
#endif /* CONFIG_NFS_V4 */
/* /*
* Destroy a shared client record * Destroy a shared client record
...@@ -213,7 +214,6 @@ static void nfs_free_client(struct nfs_client *clp) ...@@ -213,7 +214,6 @@ static void nfs_free_client(struct nfs_client *clp)
{ {
dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version); dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version);
nfs4_clear_client_minor_version(clp);
nfs4_shutdown_client(clp); nfs4_shutdown_client(clp);
nfs_fscache_release_client_cookie(clp); nfs_fscache_release_client_cookie(clp);
......
...@@ -560,7 +560,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -560,7 +560,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
desc->entry = &my_entry; desc->entry = &my_entry;
nfs_block_sillyrename(dentry); nfs_block_sillyrename(dentry);
res = nfs_revalidate_mapping_nolock(inode, filp->f_mapping); res = nfs_revalidate_mapping(inode, filp->f_mapping);
if (res < 0) if (res < 0)
goto out; goto out;
......
...@@ -36,6 +36,19 @@ struct nfs_dns_ent { ...@@ -36,6 +36,19 @@ struct nfs_dns_ent {
}; };
static void nfs_dns_ent_update(struct cache_head *cnew,
struct cache_head *ckey)
{
struct nfs_dns_ent *new;
struct nfs_dns_ent *key;
new = container_of(cnew, struct nfs_dns_ent, h);
key = container_of(ckey, struct nfs_dns_ent, h);
memcpy(&new->addr, &key->addr, key->addrlen);
new->addrlen = key->addrlen;
}
static void nfs_dns_ent_init(struct cache_head *cnew, static void nfs_dns_ent_init(struct cache_head *cnew,
struct cache_head *ckey) struct cache_head *ckey)
{ {
...@@ -49,8 +62,7 @@ static void nfs_dns_ent_init(struct cache_head *cnew, ...@@ -49,8 +62,7 @@ static void nfs_dns_ent_init(struct cache_head *cnew,
new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL); new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL);
if (new->hostname) { if (new->hostname) {
new->namelen = key->namelen; new->namelen = key->namelen;
memcpy(&new->addr, &key->addr, key->addrlen); nfs_dns_ent_update(cnew, ckey);
new->addrlen = key->addrlen;
} else { } else {
new->namelen = 0; new->namelen = 0;
new->addrlen = 0; new->addrlen = 0;
...@@ -234,7 +246,7 @@ static struct cache_detail nfs_dns_resolve = { ...@@ -234,7 +246,7 @@ static struct cache_detail nfs_dns_resolve = {
.cache_show = nfs_dns_show, .cache_show = nfs_dns_show,
.match = nfs_dns_match, .match = nfs_dns_match,
.init = nfs_dns_ent_init, .init = nfs_dns_ent_init,
.update = nfs_dns_ent_init, .update = nfs_dns_ent_update,
.alloc = nfs_dns_ent_alloc, .alloc = nfs_dns_ent_alloc,
}; };
......
...@@ -123,11 +123,11 @@ nfs_file_open(struct inode *inode, struct file *filp) ...@@ -123,11 +123,11 @@ nfs_file_open(struct inode *inode, struct file *filp)
filp->f_path.dentry->d_parent->d_name.name, filp->f_path.dentry->d_parent->d_name.name,
filp->f_path.dentry->d_name.name); filp->f_path.dentry->d_name.name);
nfs_inc_stats(inode, NFSIOS_VFSOPEN);
res = nfs_check_flags(filp->f_flags); res = nfs_check_flags(filp->f_flags);
if (res) if (res)
return res; return res;
nfs_inc_stats(inode, NFSIOS_VFSOPEN);
res = nfs_open(inode, filp); res = nfs_open(inode, filp);
return res; return res;
} }
...@@ -237,9 +237,9 @@ nfs_file_flush(struct file *file, fl_owner_t id) ...@@ -237,9 +237,9 @@ nfs_file_flush(struct file *file, fl_owner_t id)
dentry->d_parent->d_name.name, dentry->d_parent->d_name.name,
dentry->d_name.name); dentry->d_name.name);
nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
if ((file->f_mode & FMODE_WRITE) == 0) if ((file->f_mode & FMODE_WRITE) == 0)
return 0; return 0;
nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
/* Flush writes to the server and return any errors */ /* Flush writes to the server and return any errors */
return nfs_do_fsync(ctx, inode); return nfs_do_fsync(ctx, inode);
...@@ -262,9 +262,11 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov, ...@@ -262,9 +262,11 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
(unsigned long) count, (unsigned long) pos); (unsigned long) count, (unsigned long) pos);
result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, count); if (!result) {
if (!result)
result = generic_file_aio_read(iocb, iov, nr_segs, pos); result = generic_file_aio_read(iocb, iov, nr_segs, pos);
if (result > 0)
nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result);
}
return result; return result;
} }
...@@ -282,8 +284,11 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos, ...@@ -282,8 +284,11 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
(unsigned long) count, (unsigned long long) *ppos); (unsigned long) count, (unsigned long long) *ppos);
res = nfs_revalidate_mapping(inode, filp->f_mapping); res = nfs_revalidate_mapping(inode, filp->f_mapping);
if (!res) if (!res) {
res = generic_file_splice_read(filp, ppos, pipe, count, flags); res = generic_file_splice_read(filp, ppos, pipe, count, flags);
if (res > 0)
nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, res);
}
return res; return res;
} }
...@@ -596,6 +601,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -596,6 +601,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
{ {
struct dentry * dentry = iocb->ki_filp->f_path.dentry; struct dentry * dentry = iocb->ki_filp->f_path.dentry;
struct inode * inode = dentry->d_inode; struct inode * inode = dentry->d_inode;
unsigned long written = 0;
ssize_t result; ssize_t result;
size_t count = iov_length(iov, nr_segs); size_t count = iov_length(iov, nr_segs);
...@@ -622,14 +628,18 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -622,14 +628,18 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
if (!count) if (!count)
goto out; goto out;
nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
result = generic_file_aio_write(iocb, iov, nr_segs, pos); result = generic_file_aio_write(iocb, iov, nr_segs, pos);
if (result > 0)
written = result;
/* Return error values for O_DSYNC and IS_SYNC() */ /* Return error values for O_DSYNC and IS_SYNC() */
if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) { if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode); int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode);
if (err < 0) if (err < 0)
result = err; result = err;
} }
if (result > 0)
nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
out: out:
return result; return result;
...@@ -644,6 +654,7 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe, ...@@ -644,6 +654,7 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
{ {
struct dentry *dentry = filp->f_path.dentry; struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode; struct inode *inode = dentry->d_inode;
unsigned long written = 0;
ssize_t ret; ssize_t ret;
dprintk("NFS splice_write(%s/%s, %lu@%llu)\n", dprintk("NFS splice_write(%s/%s, %lu@%llu)\n",
...@@ -654,14 +665,17 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe, ...@@ -654,14 +665,17 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
* The combination of splice and an O_APPEND destination is disallowed. * The combination of splice and an O_APPEND destination is disallowed.
*/ */
nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
ret = generic_file_splice_write(pipe, filp, ppos, count, flags); ret = generic_file_splice_write(pipe, filp, ppos, count, flags);
if (ret > 0)
written = ret;
if (ret >= 0 && nfs_need_sync_write(filp, inode)) { if (ret >= 0 && nfs_need_sync_write(filp, inode)) {
int err = nfs_do_fsync(nfs_file_open_context(filp), inode); int err = nfs_do_fsync(nfs_file_open_context(filp), inode);
if (err < 0) if (err < 0)
ret = err; ret = err;
} }
if (ret > 0)
nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
return ret; return ret;
} }
......
...@@ -97,18 +97,6 @@ u64 nfs_compat_user_ino64(u64 fileid) ...@@ -97,18 +97,6 @@ u64 nfs_compat_user_ino64(u64 fileid)
return ino; return ino;
} }
int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int ret;
ret = nfs_commit_inode(inode,
wbc->sync_mode == WB_SYNC_ALL ? FLUSH_SYNC : 0);
if (ret >= 0)
return 0;
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
return ret;
}
void nfs_clear_inode(struct inode *inode) void nfs_clear_inode(struct inode *inode)
{ {
/* /*
...@@ -126,16 +114,12 @@ void nfs_clear_inode(struct inode *inode) ...@@ -126,16 +114,12 @@ void nfs_clear_inode(struct inode *inode)
*/ */
int nfs_sync_mapping(struct address_space *mapping) int nfs_sync_mapping(struct address_space *mapping)
{ {
int ret; int ret = 0;
if (mapping->nrpages == 0) if (mapping->nrpages != 0) {
return 0; unmap_mapping_range(mapping, 0, 0, 0);
unmap_mapping_range(mapping, 0, 0, 0); ret = nfs_wb_all(mapping->host);
ret = filemap_write_and_wait(mapping); }
if (ret != 0)
goto out;
ret = nfs_wb_all(mapping->host);
out:
return ret; return ret;
} }
...@@ -507,17 +491,11 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) ...@@ -507,17 +491,11 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
int err; int err;
/* /* Flush out writes to the server in order to update c/mtime. */
* Flush out writes to the server in order to update c/mtime.
*
* Hold the i_mutex to suspend application writes temporarily;
* this prevents long-running writing applications from blocking
* nfs_wb_nocommit.
*/
if (S_ISREG(inode->i_mode)) { if (S_ISREG(inode->i_mode)) {
mutex_lock(&inode->i_mutex); err = filemap_write_and_wait(inode->i_mapping);
nfs_wb_nocommit(inode); if (err)
mutex_unlock(&inode->i_mutex); goto out;
} }
/* /*
...@@ -541,6 +519,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) ...@@ -541,6 +519,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
generic_fillattr(inode, stat); generic_fillattr(inode, stat);
stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode)); stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
} }
out:
return err; return err;
} }
...@@ -616,11 +595,6 @@ void put_nfs_open_context(struct nfs_open_context *ctx) ...@@ -616,11 +595,6 @@ void put_nfs_open_context(struct nfs_open_context *ctx)
__put_nfs_open_context(ctx, 0); __put_nfs_open_context(ctx, 0);
} }
static void put_nfs_open_context_sync(struct nfs_open_context *ctx)
{
__put_nfs_open_context(ctx, 1);
}
/* /*
* Ensure that mmap has a recent RPC credential for use when writing out * Ensure that mmap has a recent RPC credential for use when writing out
* shared pages * shared pages
...@@ -667,7 +641,7 @@ static void nfs_file_clear_open_context(struct file *filp) ...@@ -667,7 +641,7 @@ static void nfs_file_clear_open_context(struct file *filp)
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
list_move_tail(&ctx->list, &NFS_I(inode)->open_files); list_move_tail(&ctx->list, &NFS_I(inode)->open_files);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
put_nfs_open_context_sync(ctx); __put_nfs_open_context(ctx, filp->f_flags & O_DIRECT ? 0 : 1);
} }
} }
...@@ -775,7 +749,7 @@ int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) ...@@ -775,7 +749,7 @@ int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
return __nfs_revalidate_inode(server, inode); return __nfs_revalidate_inode(server, inode);
} }
static int nfs_invalidate_mapping_nolock(struct inode *inode, struct address_space *mapping) static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
{ {
struct nfs_inode *nfsi = NFS_I(inode); struct nfs_inode *nfsi = NFS_I(inode);
...@@ -796,49 +770,10 @@ static int nfs_invalidate_mapping_nolock(struct inode *inode, struct address_spa ...@@ -796,49 +770,10 @@ static int nfs_invalidate_mapping_nolock(struct inode *inode, struct address_spa
return 0; return 0;
} }
static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
{
int ret = 0;
mutex_lock(&inode->i_mutex);
if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_DATA) {
ret = nfs_sync_mapping(mapping);
if (ret == 0)
ret = nfs_invalidate_mapping_nolock(inode, mapping);
}
mutex_unlock(&inode->i_mutex);
return ret;
}
/**
* nfs_revalidate_mapping_nolock - Revalidate the pagecache
* @inode - pointer to host inode
* @mapping - pointer to mapping
*/
int nfs_revalidate_mapping_nolock(struct inode *inode, struct address_space *mapping)
{
struct nfs_inode *nfsi = NFS_I(inode);
int ret = 0;
if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
|| nfs_attribute_timeout(inode) || NFS_STALE(inode)) {
ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
if (ret < 0)
goto out;
}
if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
ret = nfs_invalidate_mapping_nolock(inode, mapping);
out:
return ret;
}
/** /**
* nfs_revalidate_mapping - Revalidate the pagecache * nfs_revalidate_mapping - Revalidate the pagecache
* @inode - pointer to host inode * @inode - pointer to host inode
* @mapping - pointer to mapping * @mapping - pointer to mapping
*
* This version of the function will take the inode->i_mutex and attempt to
* flush out all dirty data if it needs to invalidate the page cache.
*/ */
int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
{ {
...@@ -1416,6 +1351,7 @@ static void init_once(void *foo) ...@@ -1416,6 +1351,7 @@ static void init_once(void *foo)
INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC); INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC);
nfsi->npages = 0; nfsi->npages = 0;
nfsi->ncommit = 0;
atomic_set(&nfsi->silly_count, 1); atomic_set(&nfsi->silly_count, 1);
INIT_HLIST_HEAD(&nfsi->silly_list); INIT_HLIST_HEAD(&nfsi->silly_list);
init_waitqueue_head(&nfsi->waitqueue); init_waitqueue_head(&nfsi->waitqueue);
......
...@@ -22,14 +22,14 @@ ...@@ -22,14 +22,14 @@
#define NFSDBG_FACILITY NFSDBG_PROC #define NFSDBG_FACILITY NFSDBG_PROC
/* A wrapper to handle the EJUKEBOX error message */ /* A wrapper to handle the EJUKEBOX and EKEYEXPIRED error messages */
static int static int
nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
{ {
int res; int res;
do { do {
res = rpc_call_sync(clnt, msg, flags); res = rpc_call_sync(clnt, msg, flags);
if (res != -EJUKEBOX) if (res != -EJUKEBOX && res != -EKEYEXPIRED)
break; break;
schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME); schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS; res = -ERESTARTSYS;
...@@ -42,9 +42,10 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) ...@@ -42,9 +42,10 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
static int static int
nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode) nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode)
{ {
if (task->tk_status != -EJUKEBOX) if (task->tk_status != -EJUKEBOX && task->tk_status != -EKEYEXPIRED)
return 0; return 0;
nfs_inc_stats(inode, NFSIOS_DELAY); if (task->tk_status == -EJUKEBOX)
nfs_inc_stats(inode, NFSIOS_DELAY);
task->tk_status = 0; task->tk_status = 0;
rpc_restart_call(task); rpc_restart_call(task);
rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
......
...@@ -46,6 +46,7 @@ enum nfs4_client_state { ...@@ -46,6 +46,7 @@ enum nfs4_client_state {
NFS4CLNT_DELEGRETURN, NFS4CLNT_DELEGRETURN,
NFS4CLNT_SESSION_RESET, NFS4CLNT_SESSION_RESET,
NFS4CLNT_SESSION_DRAINING, NFS4CLNT_SESSION_DRAINING,
NFS4CLNT_RECALL_SLOT,
}; };
/* /*
...@@ -280,6 +281,7 @@ extern void nfs4_schedule_state_manager(struct nfs_client *); ...@@ -280,6 +281,7 @@ extern void nfs4_schedule_state_manager(struct nfs_client *);
extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state);
extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state); extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state);
extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
extern void nfs41_handle_recall_slot(struct nfs_client *clp);
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
......
...@@ -281,6 +281,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, ...@@ -281,6 +281,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
} }
case -NFS4ERR_GRACE: case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY: case -NFS4ERR_DELAY:
case -EKEYEXPIRED:
ret = nfs4_delay(server->client, &exception->timeout); ret = nfs4_delay(server->client, &exception->timeout);
if (ret != 0) if (ret != 0)
break; break;
...@@ -418,7 +419,8 @@ static void nfs41_sequence_done(struct nfs_client *clp, ...@@ -418,7 +419,8 @@ static void nfs41_sequence_done(struct nfs_client *clp,
clp->cl_last_renewal = timestamp; clp->cl_last_renewal = timestamp;
spin_unlock(&clp->cl_lock); spin_unlock(&clp->cl_lock);
/* Check sequence flags */ /* Check sequence flags */
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); if (atomic_read(&clp->cl_count) > 1)
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
} }
out: out:
/* The session may be reset by one of the error handlers. */ /* The session may be reset by one of the error handlers. */
...@@ -1163,7 +1165,7 @@ static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state ...@@ -1163,7 +1165,7 @@ static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state
int err; int err;
do { do {
err = _nfs4_do_open_reclaim(ctx, state); err = _nfs4_do_open_reclaim(ctx, state);
if (err != -NFS4ERR_DELAY) if (err != -NFS4ERR_DELAY && err != -EKEYEXPIRED)
break; break;
nfs4_handle_exception(server, err, &exception); nfs4_handle_exception(server, err, &exception);
} while (exception.retry); } while (exception.retry);
...@@ -1582,6 +1584,7 @@ static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state ...@@ -1582,6 +1584,7 @@ static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state
goto out; goto out;
case -NFS4ERR_GRACE: case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY: case -NFS4ERR_DELAY:
case -EKEYEXPIRED:
nfs4_handle_exception(server, err, &exception); nfs4_handle_exception(server, err, &exception);
err = 0; err = 0;
} }
...@@ -3145,10 +3148,19 @@ static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa ...@@ -3145,10 +3148,19 @@ static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_messa
* nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
* standalone procedure for queueing an asynchronous RENEW. * standalone procedure for queueing an asynchronous RENEW.
*/ */
static void nfs4_renew_release(void *data)
{
struct nfs_client *clp = data;
if (atomic_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
}
static void nfs4_renew_done(struct rpc_task *task, void *data) static void nfs4_renew_done(struct rpc_task *task, void *data)
{ {
struct nfs_client *clp = (struct nfs_client *)task->tk_msg.rpc_argp; struct nfs_client *clp = data;
unsigned long timestamp = (unsigned long)data; unsigned long timestamp = task->tk_start;
if (task->tk_status < 0) { if (task->tk_status < 0) {
/* Unless we're shutting down, schedule state recovery! */ /* Unless we're shutting down, schedule state recovery! */
...@@ -3164,6 +3176,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *data) ...@@ -3164,6 +3176,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *data)
static const struct rpc_call_ops nfs4_renew_ops = { static const struct rpc_call_ops nfs4_renew_ops = {
.rpc_call_done = nfs4_renew_done, .rpc_call_done = nfs4_renew_done,
.rpc_release = nfs4_renew_release,
}; };
int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
...@@ -3174,8 +3187,10 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred) ...@@ -3174,8 +3187,10 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
.rpc_cred = cred, .rpc_cred = cred,
}; };
if (!atomic_inc_not_zero(&clp->cl_count))
return -EIO;
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
&nfs4_renew_ops, (void *)jiffies); &nfs4_renew_ops, clp);
} }
int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
...@@ -3452,6 +3467,7 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, ...@@ -3452,6 +3467,7 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
if (server) if (server)
nfs_inc_server_stats(server, NFSIOS_DELAY); nfs_inc_server_stats(server, NFSIOS_DELAY);
case -NFS4ERR_GRACE: case -NFS4ERR_GRACE:
case -EKEYEXPIRED:
rpc_delay(task, NFS4_POLL_RETRY_MAX); rpc_delay(task, NFS4_POLL_RETRY_MAX);
task->tk_status = 0; task->tk_status = 0;
return -EAGAIN; return -EAGAIN;
...@@ -3564,6 +3580,7 @@ int nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred) ...@@ -3564,6 +3580,7 @@ int nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred)
case -NFS4ERR_RESOURCE: case -NFS4ERR_RESOURCE:
/* The IBM lawyers misread another document! */ /* The IBM lawyers misread another document! */
case -NFS4ERR_DELAY: case -NFS4ERR_DELAY:
case -EKEYEXPIRED:
err = nfs4_delay(clp->cl_rpcclient, &timeout); err = nfs4_delay(clp->cl_rpcclient, &timeout);
} }
} while (err == 0); } while (err == 0);
...@@ -4179,7 +4196,7 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request ...@@ -4179,7 +4196,7 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
return 0; return 0;
err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
if (err != -NFS4ERR_DELAY) if (err != -NFS4ERR_DELAY && err != -EKEYEXPIRED)
break; break;
nfs4_handle_exception(server, err, &exception); nfs4_handle_exception(server, err, &exception);
} while (exception.retry); } while (exception.retry);
...@@ -4204,6 +4221,7 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request ...@@ -4204,6 +4221,7 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request
goto out; goto out;
case -NFS4ERR_GRACE: case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY: case -NFS4ERR_DELAY:
case -EKEYEXPIRED:
nfs4_handle_exception(server, err, &exception); nfs4_handle_exception(server, err, &exception);
err = 0; err = 0;
} }
...@@ -4355,6 +4373,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) ...@@ -4355,6 +4373,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
err = 0; err = 0;
goto out; goto out;
case -NFS4ERR_DELAY: case -NFS4ERR_DELAY:
case -EKEYEXPIRED:
break; break;
} }
err = nfs4_handle_exception(server, err, &exception); err = nfs4_handle_exception(server, err, &exception);
...@@ -4500,7 +4519,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) ...@@ -4500,7 +4519,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
if (status != NFS4ERR_CLID_INUSE) if (status != -NFS4ERR_CLID_INUSE)
break; break;
if (signalled()) if (signalled())
...@@ -4554,6 +4573,7 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) ...@@ -4554,6 +4573,7 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
switch (task->tk_status) { switch (task->tk_status) {
case -NFS4ERR_DELAY: case -NFS4ERR_DELAY:
case -NFS4ERR_GRACE: case -NFS4ERR_GRACE:
case -EKEYEXPIRED:
dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
rpc_delay(task, NFS4_POLL_RETRY_MIN); rpc_delay(task, NFS4_POLL_RETRY_MIN);
task->tk_status = 0; task->tk_status = 0;
...@@ -4611,26 +4631,32 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) ...@@ -4611,26 +4631,32 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
/* /*
* Reset a slot table * Reset a slot table
*/ */
static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, int max_slots, static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
int old_max_slots, int ivalue) int ivalue)
{ {
struct nfs4_slot *new = NULL;
int i; int i;
int ret = 0; int ret = 0;
dprintk("--> %s: max_reqs=%u, tbl %p\n", __func__, max_slots, tbl); dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
max_reqs, tbl->max_slots);
/* /* Does the newly negotiated max_reqs match the existing slot table? */
* Until we have dynamic slot table adjustment, insist if (max_reqs != tbl->max_slots) {
* upon the same slot table size ret = -ENOMEM;
*/ new = kmalloc(max_reqs * sizeof(struct nfs4_slot),
if (max_slots != old_max_slots) { GFP_KERNEL);
dprintk("%s reset slot table does't match old\n", if (!new)
__func__); goto out;
ret = -EINVAL; /*XXX NFS4ERR_REQ_TOO_BIG ? */ ret = 0;
goto out; kfree(tbl->slots);
} }
spin_lock(&tbl->slot_tbl_lock); spin_lock(&tbl->slot_tbl_lock);
for (i = 0; i < max_slots; ++i) if (new) {
tbl->slots = new;
tbl->max_slots = max_reqs;
}
for (i = 0; i < tbl->max_slots; ++i)
tbl->slots[i].seq_nr = ivalue; tbl->slots[i].seq_nr = ivalue;
spin_unlock(&tbl->slot_tbl_lock); spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
...@@ -4648,16 +4674,12 @@ static int nfs4_reset_slot_tables(struct nfs4_session *session) ...@@ -4648,16 +4674,12 @@ static int nfs4_reset_slot_tables(struct nfs4_session *session)
int status; int status;
status = nfs4_reset_slot_table(&session->fc_slot_table, status = nfs4_reset_slot_table(&session->fc_slot_table,
session->fc_attrs.max_reqs, session->fc_attrs.max_reqs, 1);
session->fc_slot_table.max_slots,
1);
if (status) if (status)
return status; return status;
status = nfs4_reset_slot_table(&session->bc_slot_table, status = nfs4_reset_slot_table(&session->bc_slot_table,
session->bc_attrs.max_reqs, session->bc_attrs.max_reqs, 0);
session->bc_slot_table.max_slots,
0);
return status; return status;
} }
...@@ -4798,16 +4820,14 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) ...@@ -4798,16 +4820,14 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
args->fc_attrs.headerpadsz = 0; args->fc_attrs.headerpadsz = 0;
args->fc_attrs.max_rqst_sz = mxrqst_sz; args->fc_attrs.max_rqst_sz = mxrqst_sz;
args->fc_attrs.max_resp_sz = mxresp_sz; args->fc_attrs.max_resp_sz = mxresp_sz;
args->fc_attrs.max_resp_sz_cached = mxresp_sz;
args->fc_attrs.max_ops = NFS4_MAX_OPS; args->fc_attrs.max_ops = NFS4_MAX_OPS;
args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs; args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs;
dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", "max_ops=%u max_reqs=%u\n",
__func__, __func__,
args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
args->fc_attrs.max_resp_sz_cached, args->fc_attrs.max_ops, args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
args->fc_attrs.max_reqs);
/* Back channel attributes */ /* Back channel attributes */
args->bc_attrs.headerpadsz = 0; args->bc_attrs.headerpadsz = 0;
...@@ -5016,7 +5036,16 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) ...@@ -5016,7 +5036,16 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
&res, args.sa_cache_this, 1); &res, args.sa_cache_this, 1);
} }
void nfs41_sequence_call_done(struct rpc_task *task, void *data) static void nfs41_sequence_release(void *data)
{
struct nfs_client *clp = (struct nfs_client *)data;
if (atomic_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
}
static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
{ {
struct nfs_client *clp = (struct nfs_client *)data; struct nfs_client *clp = (struct nfs_client *)data;
...@@ -5024,6 +5053,8 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data) ...@@ -5024,6 +5053,8 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data)
if (task->tk_status < 0) { if (task->tk_status < 0) {
dprintk("%s ERROR %d\n", __func__, task->tk_status); dprintk("%s ERROR %d\n", __func__, task->tk_status);
if (atomic_read(&clp->cl_count) == 1)
goto out;
if (_nfs4_async_handle_error(task, NULL, clp, NULL) if (_nfs4_async_handle_error(task, NULL, clp, NULL)
== -EAGAIN) { == -EAGAIN) {
...@@ -5032,7 +5063,7 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data) ...@@ -5032,7 +5063,7 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data)
} }
} }
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
out:
kfree(task->tk_msg.rpc_argp); kfree(task->tk_msg.rpc_argp);
kfree(task->tk_msg.rpc_resp); kfree(task->tk_msg.rpc_resp);
...@@ -5057,6 +5088,7 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data) ...@@ -5057,6 +5088,7 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
static const struct rpc_call_ops nfs41_sequence_ops = { static const struct rpc_call_ops nfs41_sequence_ops = {
.rpc_call_done = nfs41_sequence_call_done, .rpc_call_done = nfs41_sequence_call_done,
.rpc_call_prepare = nfs41_sequence_prepare, .rpc_call_prepare = nfs41_sequence_prepare,
.rpc_release = nfs41_sequence_release,
}; };
static int nfs41_proc_async_sequence(struct nfs_client *clp, static int nfs41_proc_async_sequence(struct nfs_client *clp,
...@@ -5069,12 +5101,13 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, ...@@ -5069,12 +5101,13 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp,
.rpc_cred = cred, .rpc_cred = cred,
}; };
if (!atomic_inc_not_zero(&clp->cl_count))
return -EIO;
args = kzalloc(sizeof(*args), GFP_KERNEL); args = kzalloc(sizeof(*args), GFP_KERNEL);
if (!args)
return -ENOMEM;
res = kzalloc(sizeof(*res), GFP_KERNEL); res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res) { if (!args || !res) {
kfree(args); kfree(args);
nfs_put_client(clp);
return -ENOMEM; return -ENOMEM;
} }
res->sr_slotid = NFS4_MAX_SLOT_TABLE; res->sr_slotid = NFS4_MAX_SLOT_TABLE;
......
...@@ -36,11 +36,6 @@ ...@@ -36,11 +36,6 @@
* as an rpc_task, not a real kernel thread, so it always runs in rpciod's * as an rpc_task, not a real kernel thread, so it always runs in rpciod's
* context. There is one renewd per nfs_server. * context. There is one renewd per nfs_server.
* *
* TODO: If the send queue gets backlogged (e.g., if the server goes down),
* we will keep filling the queue with periodic RENEW requests. We need a
* mechanism for ensuring that if renewd successfully sends off a request,
* then it only wakes up when the request is finished. Maybe use the
* child task framework of the RPC layer?
*/ */
#include <linux/mm.h> #include <linux/mm.h>
...@@ -63,7 +58,7 @@ nfs4_renew_state(struct work_struct *work) ...@@ -63,7 +58,7 @@ nfs4_renew_state(struct work_struct *work)
struct nfs_client *clp = struct nfs_client *clp =
container_of(work, struct nfs_client, cl_renewd.work); container_of(work, struct nfs_client, cl_renewd.work);
struct rpc_cred *cred; struct rpc_cred *cred;
long lease, timeout; long lease;
unsigned long last, now; unsigned long last, now;
ops = nfs4_state_renewal_ops[clp->cl_minorversion]; ops = nfs4_state_renewal_ops[clp->cl_minorversion];
...@@ -75,7 +70,6 @@ nfs4_renew_state(struct work_struct *work) ...@@ -75,7 +70,6 @@ nfs4_renew_state(struct work_struct *work)
lease = clp->cl_lease_time; lease = clp->cl_lease_time;
last = clp->cl_last_renewal; last = clp->cl_last_renewal;
now = jiffies; now = jiffies;
timeout = (2 * lease) / 3 + (long)last - (long)now;
/* Are we close to a lease timeout? */ /* Are we close to a lease timeout? */
if (time_after(now, last + lease/3)) { if (time_after(now, last + lease/3)) {
cred = ops->get_state_renewal_cred_locked(clp); cred = ops->get_state_renewal_cred_locked(clp);
...@@ -90,19 +84,15 @@ nfs4_renew_state(struct work_struct *work) ...@@ -90,19 +84,15 @@ nfs4_renew_state(struct work_struct *work)
/* Queue an asynchronous RENEW. */ /* Queue an asynchronous RENEW. */
ops->sched_state_renewal(clp, cred); ops->sched_state_renewal(clp, cred);
put_rpccred(cred); put_rpccred(cred);
goto out_exp;
} }
timeout = (2 * lease) / 3; } else {
spin_lock(&clp->cl_lock);
} else
dprintk("%s: failed to call renewd. Reason: lease not expired \n", dprintk("%s: failed to call renewd. Reason: lease not expired \n",
__func__); __func__);
if (timeout < 5 * HZ) /* safeguard */ spin_unlock(&clp->cl_lock);
timeout = 5 * HZ; }
dprintk("%s: requeueing work. Lease period = %ld\n", nfs4_schedule_state_renewal(clp);
__func__, (timeout + HZ - 1) / HZ); out_exp:
cancel_delayed_work(&clp->cl_renewd);
schedule_delayed_work(&clp->cl_renewd, timeout);
spin_unlock(&clp->cl_lock);
nfs_expire_unreferenced_delegations(clp); nfs_expire_unreferenced_delegations(clp);
out: out:
dprintk("%s: done\n", __func__); dprintk("%s: done\n", __func__);
......
...@@ -1249,26 +1249,65 @@ static int nfs4_reclaim_lease(struct nfs_client *clp) ...@@ -1249,26 +1249,65 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
} }
#ifdef CONFIG_NFS_V4_1 #ifdef CONFIG_NFS_V4_1
void nfs41_handle_recall_slot(struct nfs_client *clp)
{
set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
nfs4_schedule_state_recovery(clp);
}
static void nfs4_reset_all_state(struct nfs_client *clp)
{
if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
clp->cl_boot_time = CURRENT_TIME;
nfs4_state_start_reclaim_nograce(clp);
nfs4_schedule_state_recovery(clp);
}
}
static void nfs41_handle_server_reboot(struct nfs_client *clp)
{
if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
nfs4_state_start_reclaim_reboot(clp);
nfs4_schedule_state_recovery(clp);
}
}
static void nfs41_handle_state_revoked(struct nfs_client *clp)
{
/* Temporary */
nfs4_reset_all_state(clp);
}
static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
{
/* This will need to handle layouts too */
nfs_expire_all_delegations(clp);
}
static void nfs41_handle_cb_path_down(struct nfs_client *clp)
{
nfs_expire_all_delegations(clp);
if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
nfs4_schedule_state_recovery(clp);
}
void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
{ {
if (!flags) if (!flags)
return; return;
else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) { else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); nfs41_handle_server_reboot(clp);
nfs4_state_start_reclaim_reboot(clp); else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
nfs4_schedule_state_recovery(clp);
} else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
SEQ4_STATUS_ADMIN_STATE_REVOKED | SEQ4_STATUS_ADMIN_STATE_REVOKED |
SEQ4_STATUS_RECALLABLE_STATE_REVOKED | SEQ4_STATUS_LEASE_MOVED))
SEQ4_STATUS_LEASE_MOVED)) { nfs41_handle_state_revoked(clp);
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
nfs4_state_start_reclaim_nograce(clp); nfs41_handle_recallable_state_revoked(clp);
nfs4_schedule_state_recovery(clp); else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
} else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
SEQ4_STATUS_BACKCHANNEL_FAULT | SEQ4_STATUS_BACKCHANNEL_FAULT |
SEQ4_STATUS_CB_PATH_DOWN_SESSION)) SEQ4_STATUS_CB_PATH_DOWN_SESSION))
nfs_expire_all_delegations(clp); nfs41_handle_cb_path_down(clp);
} }
static int nfs4_reset_session(struct nfs_client *clp) static int nfs4_reset_session(struct nfs_client *clp)
...@@ -1285,23 +1324,52 @@ static int nfs4_reset_session(struct nfs_client *clp) ...@@ -1285,23 +1324,52 @@ static int nfs4_reset_session(struct nfs_client *clp)
memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN); memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
status = nfs4_proc_create_session(clp); status = nfs4_proc_create_session(clp);
if (status) if (status) {
status = nfs4_recovery_handle_error(clp, status); status = nfs4_recovery_handle_error(clp, status);
goto out;
}
/* create_session negotiated new slot table */
clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
out: /* Let the state manager reestablish state */
/* if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
* Let the state manager reestablish state
*/
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
status == 0)
nfs41_setup_state_renewal(clp); nfs41_setup_state_renewal(clp);
out:
return status; return status;
} }
static int nfs4_recall_slot(struct nfs_client *clp)
{
struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table;
struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs;
struct nfs4_slot *new, *old;
int i;
nfs4_begin_drain_session(clp);
new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot),
GFP_KERNEL);
if (!new)
return -ENOMEM;
spin_lock(&fc_tbl->slot_tbl_lock);
for (i = 0; i < fc_tbl->target_max_slots; i++)
new[i].seq_nr = fc_tbl->slots[i].seq_nr;
old = fc_tbl->slots;
fc_tbl->slots = new;
fc_tbl->max_slots = fc_tbl->target_max_slots;
fc_tbl->target_max_slots = 0;
fc_attrs->max_reqs = fc_tbl->max_slots;
spin_unlock(&fc_tbl->slot_tbl_lock);
kfree(old);
nfs4_end_drain_session(clp);
return 0;
}
#else /* CONFIG_NFS_V4_1 */ #else /* CONFIG_NFS_V4_1 */
static int nfs4_reset_session(struct nfs_client *clp) { return 0; } static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; } static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4_1 */
/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors /* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
...@@ -1314,6 +1382,7 @@ static void nfs4_set_lease_expired(struct nfs_client *clp, int status) ...@@ -1314,6 +1382,7 @@ static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
case -NFS4ERR_DELAY: case -NFS4ERR_DELAY:
case -NFS4ERR_CLID_INUSE: case -NFS4ERR_CLID_INUSE:
case -EAGAIN: case -EAGAIN:
case -EKEYEXPIRED:
break; break;
case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
...@@ -1397,6 +1466,15 @@ static void nfs4_state_manager(struct nfs_client *clp) ...@@ -1397,6 +1466,15 @@ static void nfs4_state_manager(struct nfs_client *clp)
nfs_client_return_marked_delegations(clp); nfs_client_return_marked_delegations(clp);
continue; continue;
} }
/* Recall session slots */
if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state)
&& nfs4_has_session(clp)) {
status = nfs4_recall_slot(clp);
if (status < 0)
goto out_error;
continue;
}
nfs4_clear_state_manager_bit(clp); nfs4_clear_state_manager_bit(clp);
/* Did we race with an attempt to give us more work? */ /* Did we race with an attempt to give us more work? */
......
...@@ -1578,6 +1578,14 @@ static void encode_create_session(struct xdr_stream *xdr, ...@@ -1578,6 +1578,14 @@ static void encode_create_session(struct xdr_stream *xdr,
char machine_name[NFS4_MAX_MACHINE_NAME_LEN]; char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
uint32_t len; uint32_t len;
struct nfs_client *clp = args->client; struct nfs_client *clp = args->client;
u32 max_resp_sz_cached;
/*
* Assumes OPEN is the biggest non-idempotent compound.
* 2 is the verifier.
*/
max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE +
RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT;
len = scnprintf(machine_name, sizeof(machine_name), "%s", len = scnprintf(machine_name, sizeof(machine_name), "%s",
clp->cl_ipaddr); clp->cl_ipaddr);
...@@ -1592,7 +1600,7 @@ static void encode_create_session(struct xdr_stream *xdr, ...@@ -1592,7 +1600,7 @@ static void encode_create_session(struct xdr_stream *xdr,
*p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */
*p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */ *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */
*p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */ *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */
*p++ = cpu_to_be32(args->fc_attrs.max_resp_sz_cached); /* Max resp sz cached */ *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */
*p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */ *p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */
*p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */ *p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */
*p++ = cpu_to_be32(0); /* rdmachannel_attrs */ *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
......
...@@ -46,6 +46,39 @@ ...@@ -46,6 +46,39 @@
#define NFSDBG_FACILITY NFSDBG_PROC #define NFSDBG_FACILITY NFSDBG_PROC
/*
* wrapper to handle the -EKEYEXPIRED error message. This should generally
* only happen if using krb5 auth and a user's TGT expires. NFSv2 doesn't
* support the NFSERR_JUKEBOX error code, but we handle this situation in the
* same way that we handle that error with NFSv3.
*/
static int
nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
{
int res;
do {
res = rpc_call_sync(clnt, msg, flags);
if (res != -EKEYEXPIRED)
break;
schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS;
} while (!fatal_signal_pending(current));
return res;
}
#define rpc_call_sync(clnt, msg, flags) nfs_rpc_wrapper(clnt, msg, flags)
static int
nfs_async_handle_expired_key(struct rpc_task *task)
{
if (task->tk_status != -EKEYEXPIRED)
return 0;
task->tk_status = 0;
rpc_restart_call(task);
rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
return 1;
}
/* /*
* Bare-bones access to getattr: this is for nfs_read_super. * Bare-bones access to getattr: this is for nfs_read_super.
*/ */
...@@ -307,6 +340,8 @@ nfs_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) ...@@ -307,6 +340,8 @@ nfs_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir) static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir)
{ {
if (nfs_async_handle_expired_key(task))
return 0;
nfs_mark_for_revalidate(dir); nfs_mark_for_revalidate(dir);
return 1; return 1;
} }
...@@ -560,6 +595,9 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, ...@@ -560,6 +595,9 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data) static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
{ {
if (nfs_async_handle_expired_key(task))
return -EAGAIN;
nfs_invalidate_atime(data->inode); nfs_invalidate_atime(data->inode);
if (task->tk_status >= 0) { if (task->tk_status >= 0) {
nfs_refresh_inode(data->inode, data->res.fattr); nfs_refresh_inode(data->inode, data->res.fattr);
...@@ -579,6 +617,9 @@ static void nfs_proc_read_setup(struct nfs_read_data *data, struct rpc_message * ...@@ -579,6 +617,9 @@ static void nfs_proc_read_setup(struct nfs_read_data *data, struct rpc_message *
static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data) static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)
{ {
if (nfs_async_handle_expired_key(task))
return -EAGAIN;
if (task->tk_status >= 0) if (task->tk_status >= 0)
nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr); nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr);
return 0; return 0;
......
...@@ -50,7 +50,7 @@ static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd) ...@@ -50,7 +50,7 @@ static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd)
struct page *page; struct page *page;
void *err; void *err;
err = ERR_PTR(nfs_revalidate_mapping_nolock(inode, inode->i_mapping)); err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping));
if (err) if (err)
goto read_failed; goto read_failed;
page = read_cache_page(&inode->i_data, 0, page = read_cache_page(&inode->i_data, 0,
......
...@@ -438,6 +438,7 @@ nfs_mark_request_commit(struct nfs_page *req) ...@@ -438,6 +438,7 @@ nfs_mark_request_commit(struct nfs_page *req)
radix_tree_tag_set(&nfsi->nfs_page_tree, radix_tree_tag_set(&nfsi->nfs_page_tree,
req->wb_index, req->wb_index,
NFS_PAGE_TAG_COMMIT); NFS_PAGE_TAG_COMMIT);
nfsi->ncommit++;
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
...@@ -501,57 +502,6 @@ int nfs_reschedule_unstable_write(struct nfs_page *req) ...@@ -501,57 +502,6 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
} }
#endif #endif
/*
* Wait for a request to complete.
*
* Interruptible by fatal signals only.
*/
static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_page *req;
pgoff_t idx_end, next;
unsigned int res = 0;
int error;
if (npages == 0)
idx_end = ~0;
else
idx_end = idx_start + npages - 1;
next = idx_start;
while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) {
if (req->wb_index > idx_end)
break;
next = req->wb_index + 1;
BUG_ON(!NFS_WBACK_BUSY(req));
kref_get(&req->wb_kref);
spin_unlock(&inode->i_lock);
error = nfs_wait_on_request(req);
nfs_release_request(req);
spin_lock(&inode->i_lock);
if (error < 0)
return error;
res++;
}
return res;
}
static void nfs_cancel_commit_list(struct list_head *head)
{
struct nfs_page *req;
while(!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_clear_request_commit(req);
nfs_inode_remove_request(req);
nfs_unlock_request(req);
}
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static int static int
nfs_need_commit(struct nfs_inode *nfsi) nfs_need_commit(struct nfs_inode *nfsi)
...@@ -573,11 +523,17 @@ static int ...@@ -573,11 +523,17 @@ static int
nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
{ {
struct nfs_inode *nfsi = NFS_I(inode); struct nfs_inode *nfsi = NFS_I(inode);
int ret;
if (!nfs_need_commit(nfsi)) if (!nfs_need_commit(nfsi))
return 0; return 0;
return nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
if (ret > 0)
nfsi->ncommit -= ret;
if (nfs_need_commit(NFS_I(inode)))
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
return ret;
} }
#else #else
static inline int nfs_need_commit(struct nfs_inode *nfsi) static inline int nfs_need_commit(struct nfs_inode *nfsi)
...@@ -642,9 +598,10 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, ...@@ -642,9 +598,10 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
} }
if (nfs_clear_request_commit(req)) if (nfs_clear_request_commit(req) &&
radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
req->wb_index, NFS_PAGE_TAG_COMMIT); req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL)
NFS_I(inode)->ncommit--;
/* Okay, the request matches. Update the region */ /* Okay, the request matches. Update the region */
if (offset < req->wb_offset) { if (offset < req->wb_offset) {
...@@ -1391,7 +1348,7 @@ static const struct rpc_call_ops nfs_commit_ops = { ...@@ -1391,7 +1348,7 @@ static const struct rpc_call_ops nfs_commit_ops = {
.rpc_release = nfs_commit_release, .rpc_release = nfs_commit_release,
}; };
int nfs_commit_inode(struct inode *inode, int how) static int nfs_commit_inode(struct inode *inode, int how)
{ {
LIST_HEAD(head); LIST_HEAD(head);
int res; int res;
...@@ -1406,92 +1363,51 @@ int nfs_commit_inode(struct inode *inode, int how) ...@@ -1406,92 +1363,51 @@ int nfs_commit_inode(struct inode *inode, int how)
} }
return res; return res;
} }
#else
static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
{
return 0;
}
#endif
long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
{ {
struct inode *inode = mapping->host; struct nfs_inode *nfsi = NFS_I(inode);
pgoff_t idx_start, idx_end; int flags = FLUSH_SYNC;
unsigned int npages = 0; int ret = 0;
LIST_HEAD(head);
int nocommit = how & FLUSH_NOCOMMIT; /* Don't commit yet if this is a non-blocking flush and there are
long pages, ret; * lots of outstanding writes for this mapping.
*/
/* FIXME */ if (wbc->sync_mode == WB_SYNC_NONE &&
if (wbc->range_cyclic) nfsi->ncommit <= (nfsi->npages >> 1))
idx_start = 0; goto out_mark_dirty;
else {
idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; if (wbc->nonblocking || wbc->for_background)
idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; flags = 0;
if (idx_end > idx_start) { ret = nfs_commit_inode(inode, flags);
pgoff_t l_npages = 1 + idx_end - idx_start; if (ret >= 0) {
npages = l_npages; if (wbc->sync_mode == WB_SYNC_NONE) {
if (sizeof(npages) != sizeof(l_npages) && if (ret < wbc->nr_to_write)
(pgoff_t)npages != l_npages) wbc->nr_to_write -= ret;
npages = 0; else
wbc->nr_to_write = 0;
} }
return 0;
} }
how &= ~FLUSH_NOCOMMIT; out_mark_dirty:
spin_lock(&inode->i_lock); __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
do {
ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
if (ret != 0)
continue;
if (nocommit)
break;
pages = nfs_scan_commit(inode, &head, idx_start, npages);
if (pages == 0)
break;
if (how & FLUSH_INVALIDATE) {
spin_unlock(&inode->i_lock);
nfs_cancel_commit_list(&head);
ret = pages;
spin_lock(&inode->i_lock);
continue;
}
pages += nfs_scan_commit(inode, &head, 0, 0);
spin_unlock(&inode->i_lock);
ret = nfs_commit_list(inode, &head, how);
spin_lock(&inode->i_lock);
} while (ret >= 0);
spin_unlock(&inode->i_lock);
return ret; return ret;
} }
#else
static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) static int nfs_commit_inode(struct inode *inode, int how)
{ {
int ret;
ret = nfs_writepages(mapping, wbc);
if (ret < 0)
goto out;
ret = nfs_sync_mapping_wait(mapping, wbc, how);
if (ret < 0)
goto out;
return 0; return 0;
out:
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
return ret;
} }
/* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
static int nfs_write_mapping(struct address_space *mapping, int how)
{ {
struct writeback_control wbc = { return 0;
.bdi = mapping->backing_dev_info, }
.sync_mode = WB_SYNC_ALL, #endif
.nr_to_write = LONG_MAX,
.range_start = 0,
.range_end = LLONG_MAX,
};
return __nfs_write_mapping(mapping, &wbc, how); int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
return nfs_commit_unstable_pages(inode, wbc);
} }
/* /*
...@@ -1499,37 +1415,26 @@ static int nfs_write_mapping(struct address_space *mapping, int how) ...@@ -1499,37 +1415,26 @@ static int nfs_write_mapping(struct address_space *mapping, int how)
*/ */
int nfs_wb_all(struct inode *inode) int nfs_wb_all(struct inode *inode)
{ {
return nfs_write_mapping(inode->i_mapping, 0); struct writeback_control wbc = {
} .sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
.range_start = 0,
.range_end = LLONG_MAX,
};
int nfs_wb_nocommit(struct inode *inode) return sync_inode(inode, &wbc);
{
return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
} }
int nfs_wb_page_cancel(struct inode *inode, struct page *page) int nfs_wb_page_cancel(struct inode *inode, struct page *page)
{ {
struct nfs_page *req; struct nfs_page *req;
loff_t range_start = page_offset(page);
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
struct writeback_control wbc = {
.bdi = page->mapping->backing_dev_info,
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
.range_start = range_start,
.range_end = range_end,
};
int ret = 0; int ret = 0;
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
for (;;) { for (;;) {
req = nfs_page_find_request(page); req = nfs_page_find_request(page);
if (req == NULL) if (req == NULL)
goto out;
if (test_bit(PG_CLEAN, &req->wb_flags)) {
nfs_release_request(req);
break; break;
}
if (nfs_lock_request_dontget(req)) { if (nfs_lock_request_dontget(req)) {
nfs_inode_remove_request(req); nfs_inode_remove_request(req);
/* /*
...@@ -1543,54 +1448,54 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) ...@@ -1543,54 +1448,54 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
ret = nfs_wait_on_request(req); ret = nfs_wait_on_request(req);
nfs_release_request(req); nfs_release_request(req);
if (ret < 0) if (ret < 0)
goto out; break;
} }
if (!PagePrivate(page))
return 0;
ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
out:
return ret; return ret;
} }
static int nfs_wb_page_priority(struct inode *inode, struct page *page, /*
int how) * Write back all requests on one page - we do this before reading it.
*/
int nfs_wb_page(struct inode *inode, struct page *page)
{ {
loff_t range_start = page_offset(page); loff_t range_start = page_offset(page);
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
struct writeback_control wbc = { struct writeback_control wbc = {
.bdi = page->mapping->backing_dev_info,
.sync_mode = WB_SYNC_ALL, .sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX, .nr_to_write = 0,
.range_start = range_start, .range_start = range_start,
.range_end = range_end, .range_end = range_end,
}; };
struct nfs_page *req;
int need_commit;
int ret; int ret;
do { while(PagePrivate(page)) {
if (clear_page_dirty_for_io(page)) { if (clear_page_dirty_for_io(page)) {
ret = nfs_writepage_locked(page, &wbc); ret = nfs_writepage_locked(page, &wbc);
if (ret < 0) if (ret < 0)
goto out_error; goto out_error;
} else if (!PagePrivate(page)) }
req = nfs_find_and_lock_request(page);
if (!req)
break; break;
ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); if (IS_ERR(req)) {
if (ret < 0) ret = PTR_ERR(req);
goto out_error; goto out_error;
} while (PagePrivate(page)); }
need_commit = test_bit(PG_CLEAN, &req->wb_flags);
nfs_clear_page_tag_locked(req);
if (need_commit) {
ret = nfs_commit_inode(inode, FLUSH_SYNC);
if (ret < 0)
goto out_error;
}
}
return 0; return 0;
out_error: out_error:
__mark_inode_dirty(inode, I_DIRTY_PAGES);
return ret; return ret;
} }
/*
* Write back all requests on one page - we do this before reading it.
*/
int nfs_wb_page(struct inode *inode, struct page* page)
{
return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
}
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
int nfs_migrate_page(struct address_space *mapping, struct page *newpage, int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page) struct page *page)
......
...@@ -33,9 +33,6 @@ ...@@ -33,9 +33,6 @@
#define FLUSH_STABLE 4 /* commit to stable storage */ #define FLUSH_STABLE 4 /* commit to stable storage */
#define FLUSH_LOWPRI 8 /* low priority background flush */ #define FLUSH_LOWPRI 8 /* low priority background flush */
#define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */ #define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */
#define FLUSH_NOCOMMIT 32 /* Don't send the NFSv3/v4 COMMIT */
#define FLUSH_INVALIDATE 64 /* Invalidate the page cache */
#define FLUSH_NOWRITEPAGE 128 /* Don't call writepage() */
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -166,6 +163,7 @@ struct nfs_inode { ...@@ -166,6 +163,7 @@ struct nfs_inode {
struct radix_tree_root nfs_page_tree; struct radix_tree_root nfs_page_tree;
unsigned long npages; unsigned long npages;
unsigned long ncommit;
/* Open contexts for shared mmap writes */ /* Open contexts for shared mmap writes */
struct list_head open_files; struct list_head open_files;
...@@ -349,7 +347,6 @@ extern int nfs_attribute_timeout(struct inode *inode); ...@@ -349,7 +347,6 @@ extern int nfs_attribute_timeout(struct inode *inode);
extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
extern int nfs_revalidate_mapping_nolock(struct inode *inode, struct address_space *mapping);
extern int nfs_setattr(struct dentry *, struct iattr *); extern int nfs_setattr(struct dentry *, struct iattr *);
extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
...@@ -477,21 +474,12 @@ extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *); ...@@ -477,21 +474,12 @@ extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
* Try to write back everything synchronously (but check the * Try to write back everything synchronously (but check the
* return value!) * return value!)
*/ */
extern long nfs_sync_mapping_wait(struct address_space *, struct writeback_control *, int);
extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_nocommit(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page* page); extern int nfs_wb_page(struct inode *inode, struct page* page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_write_data *nfs_commitdata_alloc(void); extern struct nfs_write_data *nfs_commitdata_alloc(void);
extern void nfs_commit_free(struct nfs_write_data *wdata); extern void nfs_commit_free(struct nfs_write_data *wdata);
#else
static inline int
nfs_commit_inode(struct inode *inode, int how)
{
return 0;
}
#endif #endif
static inline int static inline int
......
...@@ -193,6 +193,8 @@ struct nfs4_slot_table { ...@@ -193,6 +193,8 @@ struct nfs4_slot_table {
int max_slots; /* # slots in table */ int max_slots; /* # slots in table */
int highest_used_slotid; /* sent to server on each SEQ. int highest_used_slotid; /* sent to server on each SEQ.
* op for dynamic resizing */ * op for dynamic resizing */
int target_max_slots; /* Set by CB_RECALL_SLOT as
* the new max_slots */
}; };
static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp) static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
......
...@@ -38,12 +38,27 @@ int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); ...@@ -38,12 +38,27 @@ int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs); void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs);
void bc_release_request(struct rpc_task *); void bc_release_request(struct rpc_task *);
int bc_send(struct rpc_rqst *req); int bc_send(struct rpc_rqst *req);
/*
* Determine if a shared backchannel is in use
*/
static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
{
if (rqstp->rq_server->bc_xprt)
return 1;
return 0;
}
#else /* CONFIG_NFS_V4_1 */ #else /* CONFIG_NFS_V4_1 */
static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
unsigned int min_reqs) unsigned int min_reqs)
{ {
return 0; return 0;
} }
static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
{
return 0;
}
#endif /* CONFIG_NFS_V4_1 */ #endif /* CONFIG_NFS_V4_1 */
#endif /* _LINUX_SUNRPC_BC_XPRT_H */ #endif /* _LINUX_SUNRPC_BC_XPRT_H */
...@@ -71,8 +71,9 @@ static size_t rpc_ntop6(const struct sockaddr *sap, ...@@ -71,8 +71,9 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
if (unlikely(len == 0)) if (unlikely(len == 0))
return len; return len;
if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL)) return len;
if (sin6->sin6_scope_id == 0)
return len; return len;
rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u",
...@@ -165,8 +166,7 @@ static int rpc_parse_scope_id(const char *buf, const size_t buflen, ...@@ -165,8 +166,7 @@ static int rpc_parse_scope_id(const char *buf, const size_t buflen,
if (*delim != IPV6_SCOPE_DELIMITER) if (*delim != IPV6_SCOPE_DELIMITER)
return 0; return 0;
if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL))
return 0; return 0;
len = (buf + buflen) - delim - 1; len = (buf + buflen) - delim - 1;
......
...@@ -206,8 +206,14 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct ...@@ -206,8 +206,14 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
ctx->gc_win = window_size; ctx->gc_win = window_size;
/* gssd signals an error by passing ctx->gc_win = 0: */ /* gssd signals an error by passing ctx->gc_win = 0: */
if (ctx->gc_win == 0) { if (ctx->gc_win == 0) {
/* in which case, p points to an error code which we ignore */ /*
p = ERR_PTR(-EACCES); * in which case, p points to an error code. Anything other
* than -EKEYEXPIRED gets converted to -EACCES.
*/
p = simple_get_bytes(p, end, &ret, sizeof(ret));
if (!IS_ERR(p))
p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
ERR_PTR(-EACCES);
goto err; goto err;
} }
/* copy the opaque wire context */ /* copy the opaque wire context */
...@@ -646,6 +652,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) ...@@ -646,6 +652,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
err = PTR_ERR(p); err = PTR_ERR(p);
switch (err) { switch (err) {
case -EACCES: case -EACCES:
case -EKEYEXPIRED:
gss_msg->msg.errno = err; gss_msg->msg.errno = err;
err = mlen; err = mlen;
break; break;
......
...@@ -506,6 +506,10 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) ...@@ -506,6 +506,10 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
{ {
unsigned int pages, arghi; unsigned int pages, arghi;
/* bc_xprt uses fore channel allocated buffers */
if (svc_is_backchannel(rqstp))
return 1;
pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
* We assume one is at most one page * We assume one is at most one page
*/ */
......
...@@ -1912,6 +1912,11 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt, ...@@ -1912,6 +1912,11 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
case -EALREADY: case -EALREADY:
xprt_clear_connecting(xprt); xprt_clear_connecting(xprt);
return; return;
case -EINVAL:
/* Happens, for instance, if the user specified a link
* local IPv6 address without a scope-id.
*/
goto out;
} }
out_eagain: out_eagain:
status = -EAGAIN; status = -EAGAIN;
...@@ -2100,7 +2105,7 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) ...@@ -2100,7 +2105,7 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
* we allocate pages instead doing a kmalloc like rpc_malloc is because we want * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
* to use the server side send routines. * to use the server side send routines.
*/ */
void *bc_malloc(struct rpc_task *task, size_t size) static void *bc_malloc(struct rpc_task *task, size_t size)
{ {
struct page *page; struct page *page;
struct rpc_buffer *buf; struct rpc_buffer *buf;
...@@ -2120,7 +2125,7 @@ void *bc_malloc(struct rpc_task *task, size_t size) ...@@ -2120,7 +2125,7 @@ void *bc_malloc(struct rpc_task *task, size_t size)
/* /*
* Free the space allocated in the bc_alloc routine * Free the space allocated in the bc_alloc routine
*/ */
void bc_free(void *buffer) static void bc_free(void *buffer)
{ {
struct rpc_buffer *buf; struct rpc_buffer *buf;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment