Commit 6504d82f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfs-for-5.20-3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client bugfixes from Trond Myklebust:

 - Fix SUNRPC call completion races with call_decode() that trigger a
   WARN_ON()

 - NFSv4.0 cannot support open-by-filehandle and NFS re-export

 - Revert "SUNRPC: Remove unreachable error condition" to allow handling
   of error conditions

 - Update suid/sgid mode bits after ALLOCATE and DEALLOCATE

* tag 'nfs-for-5.20-3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs:
  Revert "SUNRPC: Remove unreachable error condition"
  NFSv4.2: Update mode bits after ALLOCATE and DEALLOCATE
  NFSv4: Turn off open-by-filehandle and NFS re-export for NFSv4.0
  SUNRPC: Fix call completion races with call_decode()
parents 62d1cea7 13bd9014
...@@ -606,6 +606,31 @@ static inline gfp_t nfs_io_gfp_mask(void) ...@@ -606,6 +606,31 @@ static inline gfp_t nfs_io_gfp_mask(void)
return GFP_KERNEL; return GFP_KERNEL;
} }
/*
* Special version of should_remove_suid() that ignores capabilities.
*/
static inline int nfs_should_remove_suid(const struct inode *inode)
{
umode_t mode = inode->i_mode;
int kill = 0;
/* suid always must be killed */
if (unlikely(mode & S_ISUID))
kill = ATTR_KILL_SUID;
/*
* sgid without any exec bits is just a mandatory locking mark; leave
* it alone. If some exec bits are set, it's a real sgid; kill it.
*/
if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
kill |= ATTR_KILL_SGID;
if (unlikely(kill && S_ISREG(mode)))
return kill;
return 0;
}
/* unlink.c */ /* unlink.c */
extern struct rpc_task * extern struct rpc_task *
nfs_async_rename(struct inode *old_dir, struct inode *new_dir, nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
......
...@@ -78,10 +78,15 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, ...@@ -78,10 +78,15 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
status = nfs4_call_sync(server->client, server, msg, status = nfs4_call_sync(server->client, server, msg,
&args.seq_args, &res.seq_res, 0); &args.seq_args, &res.seq_res, 0);
if (status == 0) if (status == 0) {
if (nfs_should_remove_suid(inode)) {
spin_lock(&inode->i_lock);
nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
spin_unlock(&inode->i_lock);
}
status = nfs_post_op_update_inode_force_wcc(inode, status = nfs_post_op_update_inode_force_wcc(inode,
res.falloc_fattr); res.falloc_fattr);
}
if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE]) if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE])
trace_nfs4_fallocate(inode, &args, status); trace_nfs4_fallocate(inode, &args, status);
else else
......
...@@ -1051,22 +1051,31 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx) ...@@ -1051,22 +1051,31 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx)
if (ctx->bsize) if (ctx->bsize)
sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits); sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits);
if (server->nfs_client->rpc_ops->version != 2) { switch (server->nfs_client->rpc_ops->version) {
/* The VFS shouldn't apply the umask to mode bits. We will do case 2:
* so ourselves when necessary. sb->s_time_gran = 1000;
sb->s_time_min = 0;
sb->s_time_max = U32_MAX;
break;
case 3:
/*
* The VFS shouldn't apply the umask to mode bits.
* We will do so ourselves when necessary.
*/ */
sb->s_flags |= SB_POSIXACL; sb->s_flags |= SB_POSIXACL;
sb->s_time_gran = 1; sb->s_time_gran = 1;
sb->s_export_op = &nfs_export_ops;
} else
sb->s_time_gran = 1000;
if (server->nfs_client->rpc_ops->version != 4) {
sb->s_time_min = 0; sb->s_time_min = 0;
sb->s_time_max = U32_MAX; sb->s_time_max = U32_MAX;
} else { sb->s_export_op = &nfs_export_ops;
break;
case 4:
sb->s_flags |= SB_POSIXACL;
sb->s_time_gran = 1;
sb->s_time_min = S64_MIN; sb->s_time_min = S64_MIN;
sb->s_time_max = S64_MAX; sb->s_time_max = S64_MAX;
if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
sb->s_export_op = &nfs_export_ops;
break;
} }
sb->s_magic = NFS_SUPER_MAGIC; sb->s_magic = NFS_SUPER_MAGIC;
......
...@@ -1496,31 +1496,6 @@ void nfs_commit_prepare(struct rpc_task *task, void *calldata) ...@@ -1496,31 +1496,6 @@ void nfs_commit_prepare(struct rpc_task *task, void *calldata)
NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
} }
/*
* Special version of should_remove_suid() that ignores capabilities.
*/
static int nfs_should_remove_suid(const struct inode *inode)
{
umode_t mode = inode->i_mode;
int kill = 0;
/* suid always must be killed */
if (unlikely(mode & S_ISUID))
kill = ATTR_KILL_SUID;
/*
* sgid without any exec bits is just a mandatory locking mark; leave
* it alone. If some exec bits are set, it's a real sgid; kill it.
*/
if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
kill |= ATTR_KILL_SGID;
if (unlikely(kill && S_ISREG(mode)))
return kill;
return 0;
}
static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
struct nfs_fattr *fattr) struct nfs_fattr *fattr)
{ {
......
...@@ -2873,6 +2873,9 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt, ...@@ -2873,6 +2873,9 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC, task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
&rpc_cb_add_xprt_call_ops, data); &rpc_cb_add_xprt_call_ops, data);
if (IS_ERR(task))
return PTR_ERR(task);
data->xps->xps_nunique_destaddr_xprts++; data->xps->xps_nunique_destaddr_xprts++;
rpc_put_task(task); rpc_put_task(task);
success: success:
......
...@@ -1179,11 +1179,8 @@ xprt_request_dequeue_receive_locked(struct rpc_task *task) ...@@ -1179,11 +1179,8 @@ xprt_request_dequeue_receive_locked(struct rpc_task *task)
{ {
struct rpc_rqst *req = task->tk_rqstp; struct rpc_rqst *req = task->tk_rqstp;
if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) { if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
xprt_request_rb_remove(req->rq_xprt, req); xprt_request_rb_remove(req->rq_xprt, req);
xdr_free_bvec(&req->rq_rcv_buf);
req->rq_private_buf.bvec = NULL;
}
} }
/** /**
...@@ -1221,6 +1218,8 @@ void xprt_complete_rqst(struct rpc_task *task, int copied) ...@@ -1221,6 +1218,8 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
xprt->stat.recvs++; xprt->stat.recvs++;
xdr_free_bvec(&req->rq_rcv_buf);
req->rq_private_buf.bvec = NULL;
req->rq_private_buf.len = copied; req->rq_private_buf.len = copied;
/* Ensure all writes are done before we update */ /* Ensure all writes are done before we update */
/* req->rq_reply_bytes_recvd */ /* req->rq_reply_bytes_recvd */
...@@ -1453,6 +1452,7 @@ xprt_request_dequeue_xprt(struct rpc_task *task) ...@@ -1453,6 +1452,7 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
xprt_request_dequeue_transmit_locked(task); xprt_request_dequeue_transmit_locked(task);
xprt_request_dequeue_receive_locked(task); xprt_request_dequeue_receive_locked(task);
spin_unlock(&xprt->queue_lock); spin_unlock(&xprt->queue_lock);
xdr_free_bvec(&req->rq_rcv_buf);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment