Commit 4e1c80ae authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfsd-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux

Pull nfsd updates from Chuck Lever:
 "The big ticket item for this release is that support for RPC-with-TLS
  [RFC 9289] has been added to the Linux NFS server.

  The goal is to provide a simple-to-deploy, low-overhead in-transit
  confidentiality and peer authentication mechanism. It can supplement
  NFS Kerberos and it can protect the use of legacy non-cryptographic
  user authentication flavors such as AUTH_SYS. The TLS Record protocol
  is handled entirely by kTLS, meaning it can use either software
  encryption or offload encryption to smart NICs.

  Aside from that, work continues on improving NFSD's open file cache.
  Among the many clean-ups in that area is a patch to convert the
  rhashtable to use the list-hashing version of that data structure"

* tag 'nfsd-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux: (31 commits)
  NFSD: Handle new xprtsec= export option
  SUNRPC: Support TLS handshake in the server-side TCP socket code
  NFSD: Clean up xattr memory allocation flags
  NFSD: Fix problem of COMMIT and NFS4ERR_DELAY in infinite loop
  SUNRPC: Clear rq_xid when receiving a new RPC Call
  SUNRPC: Recognize control messages in server-side TCP socket code
  SUNRPC: Be even lazier about releasing pages
  SUNRPC: Convert svc_xprt_release() to the release_pages() API
  SUNRPC: Relocate svc_free_res_pages()
  nfsd: simplify the delayed disposal list code
  SUNRPC: Ignore return value of ->xpo_sendto
  SUNRPC: Ensure server-side sockets have a sock->file
  NFSD: Watch for rq_pages bounds checking errors in nfsd_splice_actor()
  sunrpc: simplify two-level sysctl registration for svcrdma_parm_table
  SUNRPC: return proper error from get_expiry()
  lockd: add some client-side tracepoints
  nfs: move nfs_fhandle_hash to common include file
  lockd: server should unlock lock if client rejects the grant
  lockd: fix races in client GRANTED_MSG wait logic
  lockd: move struct nlm_wait to lockd.h
  ...
parents 0127f25b 9280c577
......@@ -3,10 +3,12 @@
# Makefile for the linux lock manager stuff
#
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_LOCKD) += lockd.o
lockd-objs-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
svcshare.o svcproc.o svcsubs.o mon.o xdr.o
lockd-objs-y += clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
svcshare.o svcproc.o svcsubs.o mon.o trace.o xdr.o
lockd-objs-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o
lockd-objs-$(CONFIG_PROC_FS) += procfs.o
lockd-objs := $(lockd-objs-y)
......@@ -14,9 +14,12 @@
#include <linux/nfs_fs.h>
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svc_xprt.h>
#include <linux/lockd/lockd.h>
#include <linux/kthread.h>
#include "trace.h"
#define NLMDBG_FACILITY NLMDBG_CLIENT
/*
......@@ -29,18 +32,6 @@ static int reclaimer(void *ptr);
* client perspective.
*/
/*
* This is the representation of a blocked client lock.
*/
struct nlm_wait {
struct list_head b_list; /* linked list */
wait_queue_head_t b_wait; /* where to wait on */
struct nlm_host * b_host;
struct file_lock * b_lock; /* local file lock */
unsigned short b_reclaim; /* got to reclaim lock */
__be32 b_status; /* grant callback status */
};
static LIST_HEAD(nlm_blocked);
static DEFINE_SPINLOCK(nlm_blocked_lock);
......@@ -94,41 +85,42 @@ void nlmclnt_done(struct nlm_host *host)
}
EXPORT_SYMBOL_GPL(nlmclnt_done);
/*
* Queue up a lock for blocking so that the GRANTED request can see it
*/
struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl)
void nlmclnt_prepare_block(struct nlm_wait *block, struct nlm_host *host, struct file_lock *fl)
{
struct nlm_wait *block;
block = kmalloc(sizeof(*block), GFP_KERNEL);
if (block != NULL) {
block->b_host = host;
block->b_lock = fl;
init_waitqueue_head(&block->b_wait);
block->b_status = nlm_lck_blocked;
}
/*
* Queue up a lock for blocking so that the GRANTED request can see it
*/
void nlmclnt_queue_block(struct nlm_wait *block)
{
spin_lock(&nlm_blocked_lock);
list_add(&block->b_list, &nlm_blocked);
spin_unlock(&nlm_blocked_lock);
}
return block;
}
void nlmclnt_finish_block(struct nlm_wait *block)
/*
* Dequeue the block and return its final status
*/
__be32 nlmclnt_dequeue_block(struct nlm_wait *block)
{
if (block == NULL)
return;
__be32 status;
spin_lock(&nlm_blocked_lock);
list_del(&block->b_list);
status = block->b_status;
spin_unlock(&nlm_blocked_lock);
kfree(block);
return status;
}
/*
* Block on a lock
*/
int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
int nlmclnt_wait(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
{
long ret;
......@@ -154,7 +146,6 @@ int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
/* Reset the lock status after a server reboot so we resend */
if (block->b_status == nlm_lck_denied_grace_period)
block->b_status = nlm_lck_blocked;
req->a_res.status = block->b_status;
return 0;
}
......@@ -198,6 +189,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
res = nlm_granted;
}
spin_unlock(&nlm_blocked_lock);
trace_nlmclnt_grant(lock, addr, svc_addr_len(addr), res);
return res;
}
......
......@@ -20,6 +20,8 @@
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#include "trace.h"
#define NLMDBG_FACILITY NLMDBG_CLIENT
#define NLMCLNT_GRACE_WAIT (5*HZ)
#define NLMCLNT_POLL_TIMEOUT (30*HZ)
......@@ -451,6 +453,9 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
status = nlm_stat_to_errno(req->a_res.status);
}
out:
trace_nlmclnt_test(&req->a_args.lock,
(const struct sockaddr *)&req->a_host->h_addr,
req->a_host->h_addrlen, req->a_res.status);
nlmclnt_release_call(req);
return status;
}
......@@ -516,9 +521,10 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
const struct cred *cred = nfs_file_cred(fl->fl_file);
struct nlm_host *host = req->a_host;
struct nlm_res *resp = &req->a_res;
struct nlm_wait *block = NULL;
struct nlm_wait block;
unsigned char fl_flags = fl->fl_flags;
unsigned char fl_type;
__be32 b_status;
int status = -ENOLCK;
if (nsm_monitor(host) < 0)
......@@ -531,14 +537,21 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
if (status < 0)
goto out;
block = nlmclnt_prepare_block(host, fl);
nlmclnt_prepare_block(&block, host, fl);
again:
/*
* Initialise resp->status to a valid non-zero value,
* since 0 == nlm_lck_granted
*/
resp->status = nlm_lck_blocked;
for(;;) {
/*
* A GRANTED callback can come at any time -- even before the reply
* to the LOCK request arrives, so we queue the wait before
* requesting the lock.
*/
nlmclnt_queue_block(&block);
for (;;) {
/* Reboot protection */
fl->fl_u.nfs_fl.state = host->h_state;
status = nlmclnt_call(cred, req, NLMPROC_LOCK);
......@@ -550,12 +563,15 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
if (resp->status != nlm_lck_blocked)
break;
/* Wait on an NLM blocking lock */
status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
status = nlmclnt_wait(&block, req, NLMCLNT_POLL_TIMEOUT);
if (status < 0)
break;
if (resp->status != nlm_lck_blocked)
if (block.b_status != nlm_lck_blocked)
break;
}
b_status = nlmclnt_dequeue_block(&block);
if (resp->status == nlm_lck_blocked)
resp->status = b_status;
/* if we were interrupted while blocking, then cancel the lock request
* and exit
......@@ -564,7 +580,7 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
if (!req->a_args.block)
goto out_unlock;
if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
goto out_unblock;
goto out;
}
if (resp->status == nlm_granted) {
......@@ -593,16 +609,19 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
status = -ENOLCK;
else
status = nlm_stat_to_errno(resp->status);
out_unblock:
nlmclnt_finish_block(block);
out:
trace_nlmclnt_lock(&req->a_args.lock,
(const struct sockaddr *)&req->a_host->h_addr,
req->a_host->h_addrlen, req->a_res.status);
nlmclnt_release_call(req);
return status;
out_unlock:
/* Fatal error: ensure that we remove the lock altogether */
trace_nlmclnt_lock(&req->a_args.lock,
(const struct sockaddr *)&req->a_host->h_addr,
req->a_host->h_addrlen, req->a_res.status);
dprintk("lockd: lock attempt ended in fatal error.\n"
" Attempting to unlock.\n");
nlmclnt_finish_block(block);
fl_type = fl->fl_type;
fl->fl_type = F_UNLCK;
down_read(&host->h_rwsem);
......@@ -696,6 +715,9 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
/* What to do now? I'm out of my depth... */
status = -ENOLCK;
out:
trace_nlmclnt_unlock(&req->a_args.lock,
(const struct sockaddr *)&req->a_host->h_addr,
req->a_host->h_addrlen, req->a_res.status);
nlmclnt_release_call(req);
return status;
}
......
......@@ -629,6 +629,7 @@ nlm_shutdown_hosts_net(struct net *net)
rpc_shutdown_client(host->h_rpcclnt);
host->h_rpcclnt = NULL;
}
nlmsvc_free_host_resources(host);
}
/* Then, perform a garbage collection pass */
......
......@@ -954,19 +954,32 @@ void
nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
{
struct nlm_block *block;
struct file_lock *fl;
int error;
dprintk("grant_reply: looking for cookie %x, s=%d \n",
*(unsigned int *)(cookie->data), status);
if (!(block = nlmsvc_find_block(cookie)))
return;
if (status == nlm_lck_denied_grace_period) {
switch (status) {
case nlm_lck_denied_grace_period:
/* Try again in a couple of seconds */
nlmsvc_insert_block(block, 10 * HZ);
} else {
break;
case nlm_lck_denied:
/* Client doesn't want it, just unlock it */
nlmsvc_unlink_block(block);
fl = &block->b_call->a_args.lock.fl;
fl->fl_type = F_UNLCK;
error = vfs_lock_file(fl->fl_file, F_SETLK, fl, NULL);
if (error)
pr_warn("lockd: unable to unlock lock rejected by client!\n");
break;
default:
/*
* Lock is now held by client, or has been rejected.
* In both cases, the block should be removed.
* Either it was accepted or the status makes no sense
* just unlink it either way.
*/
nlmsvc_unlink_block(block);
}
......
// SPDX-License-Identifier: GPL-2.0
#define CREATE_TRACE_POINTS
#include "trace.h"
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM lockd
#if !defined(_TRACE_LOCKD_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_LOCKD_H
#include <linux/tracepoint.h>
#include <linux/crc32.h>
#include <linux/nfs.h>
#include <linux/lockd/lockd.h>
#ifdef CONFIG_LOCKD_V4
#define NLM_STATUS_LIST \
nlm_status_code(LCK_GRANTED) \
nlm_status_code(LCK_DENIED) \
nlm_status_code(LCK_DENIED_NOLOCKS) \
nlm_status_code(LCK_BLOCKED) \
nlm_status_code(LCK_DENIED_GRACE_PERIOD) \
nlm_status_code(DEADLCK) \
nlm_status_code(ROFS) \
nlm_status_code(STALE_FH) \
nlm_status_code(FBIG) \
nlm_status_code_end(FAILED)
#else
#define NLM_STATUS_LIST \
nlm_status_code(LCK_GRANTED) \
nlm_status_code(LCK_DENIED) \
nlm_status_code(LCK_DENIED_NOLOCKS) \
nlm_status_code(LCK_BLOCKED) \
nlm_status_code_end(LCK_DENIED_GRACE_PERIOD)
#endif
#undef nlm_status_code
#undef nlm_status_code_end
#define nlm_status_code(x) TRACE_DEFINE_ENUM(NLM_##x);
#define nlm_status_code_end(x) TRACE_DEFINE_ENUM(NLM_##x);
NLM_STATUS_LIST
#undef nlm_status_code
#undef nlm_status_code_end
#define nlm_status_code(x) { NLM_##x, #x },
#define nlm_status_code_end(x) { NLM_##x, #x }
#define show_nlm_status(x) __print_symbolic(x, NLM_STATUS_LIST)
DECLARE_EVENT_CLASS(nlmclnt_lock_event,
TP_PROTO(
const struct nlm_lock *lock,
const struct sockaddr *addr,
unsigned int addrlen,
__be32 status
),
TP_ARGS(lock, addr, addrlen, status),
TP_STRUCT__entry(
__field(u32, oh)
__field(u32, svid)
__field(u32, fh)
__field(unsigned long, status)
__field(u64, start)
__field(u64, len)
__sockaddr(addr, addrlen)
),
TP_fast_assign(
__entry->oh = ~crc32_le(0xffffffff, lock->oh.data, lock->oh.len);
__entry->svid = lock->svid;
__entry->fh = nfs_fhandle_hash(&lock->fh);
__entry->start = lock->lock_start;
__entry->len = lock->lock_len;
__entry->status = be32_to_cpu(status);
__assign_sockaddr(addr, addr, addrlen);
),
TP_printk(
"addr=%pISpc oh=0x%08x svid=0x%08x fh=0x%08x start=%llu len=%llu status=%s",
__get_sockaddr(addr), __entry->oh, __entry->svid,
__entry->fh, __entry->start, __entry->len,
show_nlm_status(__entry->status)
)
);
#define DEFINE_NLMCLNT_EVENT(name) \
DEFINE_EVENT(nlmclnt_lock_event, name, \
TP_PROTO( \
const struct nlm_lock *lock, \
const struct sockaddr *addr, \
unsigned int addrlen, \
__be32 status \
), \
TP_ARGS(lock, addr, addrlen, status))
DEFINE_NLMCLNT_EVENT(nlmclnt_test);
DEFINE_NLMCLNT_EVENT(nlmclnt_lock);
DEFINE_NLMCLNT_EVENT(nlmclnt_unlock);
DEFINE_NLMCLNT_EVENT(nlmclnt_grant);
#endif /* _TRACE_LOCKD_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
......@@ -149,7 +149,10 @@ const struct export_operations nfs_export_ops = {
.encode_fh = nfs_encode_fh,
.fh_to_dentry = nfs_fh_to_dentry,
.get_parent = nfs_get_parent,
.flags = EXPORT_OP_NOWCC|EXPORT_OP_NOSUBTREECHK|
EXPORT_OP_CLOSE_BEFORE_UNLINK|EXPORT_OP_REMOTE_FS|
EXPORT_OP_NOATOMIC_ATTR,
.flags = EXPORT_OP_NOWCC |
EXPORT_OP_NOSUBTREECHK |
EXPORT_OP_CLOSE_BEFORE_UNLINK |
EXPORT_OP_REMOTE_FS |
EXPORT_OP_NOATOMIC_ATTR |
EXPORT_OP_FLUSH_ON_CLOSE,
};
......@@ -855,27 +855,12 @@ u64 nfs_timespec_to_change_attr(const struct timespec64 *ts)
}
#ifdef CONFIG_CRC32
/**
* nfs_fhandle_hash - calculate the crc32 hash for the filehandle
* @fh - pointer to filehandle
*
* returns a crc32 hash for the filehandle that is compatible with
* the one displayed by "wireshark".
*/
static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
{
return ~crc32_le(0xFFFFFFFF, &fh->data[0], fh->size);
}
static inline u32 nfs_stateid_hash(const nfs4_stateid *stateid)
{
return ~crc32_le(0xFFFFFFFF, &stateid->other[0],
NFS4_STATEID_OTHER_SIZE);
}
#else
static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
{
return 0;
}
static inline u32 nfs_stateid_hash(nfs4_stateid *stateid)
{
return 0;
......
......@@ -123,8 +123,8 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
/* OK, we seem to have a valid key */
key.h.flags = 0;
key.h.expiry_time = get_expiry(&mesg);
if (key.h.expiry_time == 0)
err = get_expiry(&mesg, &key.h.expiry_time);
if (err)
goto out;
key.ek_client = dom;
......@@ -439,7 +439,6 @@ static int check_export(struct path *path, int *flags, unsigned char *uuid)
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_NFSD_V4
......@@ -546,6 +545,29 @@ static inline int
secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { return 0; }
#endif
static int xprtsec_parse(char **mesg, char *buf, struct svc_export *exp)
{
unsigned int i, mode, listsize;
int err;
err = get_uint(mesg, &listsize);
if (err)
return err;
if (listsize > NFSEXP_XPRTSEC_NUM)
return -EINVAL;
exp->ex_xprtsec_modes = 0;
for (i = 0; i < listsize; i++) {
err = get_uint(mesg, &mode);
if (err)
return err;
if (mode > NFSEXP_XPRTSEC_MTLS)
return -EINVAL;
exp->ex_xprtsec_modes |= mode;
}
return 0;
}
static inline int
nfsd_uuid_parse(char **mesg, char *buf, unsigned char **puuid)
{
......@@ -608,11 +630,11 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
exp.ex_client = dom;
exp.cd = cd;
exp.ex_devid_map = NULL;
exp.ex_xprtsec_modes = NFSEXP_XPRTSEC_ALL;
/* expiry */
err = -EINVAL;
exp.h.expiry_time = get_expiry(&mesg);
if (exp.h.expiry_time == 0)
err = get_expiry(&mesg, &exp.h.expiry_time);
if (err)
goto out3;
/* flags */
......@@ -650,6 +672,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
err = nfsd_uuid_parse(&mesg, buf, &exp.ex_uuid);
else if (strcmp(buf, "secinfo") == 0)
err = secinfo_parse(&mesg, buf, &exp);
else if (strcmp(buf, "xprtsec") == 0)
err = xprtsec_parse(&mesg, buf, &exp);
else
/* quietly ignore unknown words and anything
* following. Newer user-space can try to set
......@@ -663,6 +687,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
err = check_export(&exp.ex_path, &exp.ex_flags, exp.ex_uuid);
if (err)
goto out4;
/*
* No point caching this if it would immediately expire.
* Also, this protects exportfs's dummy export from the
......@@ -824,6 +849,7 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
for (i = 0; i < MAX_SECINFO_LIST; i++) {
new->ex_flavors[i] = item->ex_flavors[i];
}
new->ex_xprtsec_modes = item->ex_xprtsec_modes;
}
static struct cache_head *svc_export_alloc(void)
......@@ -1035,9 +1061,26 @@ static struct svc_export *exp_find(struct cache_detail *cd,
__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
{
struct exp_flavor_info *f;
struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
struct svc_xprt *xprt = rqstp->rq_xprt;
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) {
if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags))
goto ok;
}
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_TLS) {
if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
!test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
goto ok;
}
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_MTLS) {
if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
goto ok;
}
goto denied;
ok:
/* legacy gss-only clients are always OK: */
if (exp->ex_client == rqstp->rq_gssclient)
return 0;
......@@ -1062,6 +1105,7 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
if (nfsd4_spo_must_allow(rqstp))
return 0;
denied:
return rqstp->rq_vers < 4 ? nfserr_acces : nfserr_wrongsec;
}
......
......@@ -77,6 +77,7 @@ struct svc_export {
struct cache_detail *cd;
struct rcu_head ex_rcu;
struct export_stats ex_stats;
unsigned long ex_xprtsec_modes;
};
/* an "export key" (expkey) maps a filehandlefragement to an
......
This diff is collapsed.
......@@ -29,9 +29,8 @@ struct nfsd_file_mark {
* never be dereferenced, only used for comparison.
*/
struct nfsd_file {
struct rhash_head nf_rhash;
struct list_head nf_lru;
struct rcu_head nf_rcu;
struct rhlist_head nf_rlist;
void *nf_inode;
struct file *nf_file;
const struct cred *nf_cred;
struct net *nf_net;
......@@ -40,10 +39,12 @@ struct nfsd_file {
#define NFSD_FILE_REFERENCED (2)
#define NFSD_FILE_GC (3)
unsigned long nf_flags;
struct inode *nf_inode; /* don't deref */
refcount_t nf_ref;
unsigned char nf_may;
struct nfsd_file_mark *nf_mark;
struct list_head nf_lru;
struct rcu_head nf_rcu;
ktime_t nf_birthtime;
};
......
......@@ -240,8 +240,8 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
goto out;
/* expiry */
ent.h.expiry_time = get_expiry(&buf);
if (ent.h.expiry_time == 0)
error = get_expiry(&buf, &ent.h.expiry_time);
if (error)
goto out;
error = -ENOMEM;
......@@ -408,8 +408,8 @@ nametoid_parse(struct cache_detail *cd, char *buf, int buflen)
memcpy(ent.name, buf1, sizeof(ent.name));
/* expiry */
ent.h.expiry_time = get_expiry(&buf);
if (ent.h.expiry_time == 0)
error = get_expiry(&buf, &ent.h.expiry_time);
if (error)
goto out;
/* ID */
......
......@@ -930,6 +930,9 @@ nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags,
* Grab and keep cached pages associated with a file in the svc_rqst
* so that they can be passed to the network sendmsg/sendpage routines
* directly. They will be released after the sending has completed.
*
* Return values: Number of bytes consumed, or -EIO if there are no
* remaining pages in rqstp->rq_pages.
*/
static int
nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
......@@ -948,7 +951,8 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
*/
if (page == *(rqstp->rq_next_page - 1))
continue;
svc_rqst_replace_page(rqstp, page);
if (unlikely(!svc_rqst_replace_page(rqstp, page)))
return -EIO;
}
if (rqstp->rq_res.page_len == 0) // first call
rqstp->rq_res.page_base = offset % PAGE_SIZE;
......@@ -2164,7 +2168,7 @@ nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
goto out;
}
buf = kvmalloc(len, GFP_KERNEL | GFP_NOFS);
buf = kvmalloc(len, GFP_KERNEL);
if (buf == NULL) {
err = nfserr_jukebox;
goto out;
......@@ -2227,10 +2231,7 @@ nfsd_listxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char **bufp,
goto out;
}
/*
* We're holding i_rwsem - use GFP_NOFS.
*/
buf = kvmalloc(len, GFP_KERNEL | GFP_NOFS);
buf = kvmalloc(len, GFP_KERNEL);
if (buf == NULL) {
err = nfserr_jukebox;
goto out;
......
......@@ -220,6 +220,7 @@ struct export_operations {
#define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply
atomic attribute updates
*/
#define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */
unsigned long flags;
};
......
......@@ -99,21 +99,11 @@ struct nsm_handle {
/*
* Rigorous type checking on sockaddr type conversions
*/
static inline struct sockaddr_in *nlm_addr_in(const struct nlm_host *host)
{
return (struct sockaddr_in *)&host->h_addr;
}
static inline struct sockaddr *nlm_addr(const struct nlm_host *host)
{
return (struct sockaddr *)&host->h_addr;
}
static inline struct sockaddr_in *nlm_srcaddr_in(const struct nlm_host *host)
{
return (struct sockaddr_in *)&host->h_srcaddr;
}
static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host)
{
return (struct sockaddr *)&host->h_srcaddr;
......@@ -131,7 +121,16 @@ struct nlm_lockowner {
uint32_t pid;
};
struct nlm_wait;
/*
* This is the representation of a blocked client lock.
*/
struct nlm_wait {
struct list_head b_list; /* linked list */
wait_queue_head_t b_wait; /* where to wait on */
struct nlm_host *b_host;
struct file_lock *b_lock; /* local file lock */
__be32 b_status; /* grant callback status */
};
/*
* Memory chunk for NLM client RPC request.
......@@ -212,9 +211,11 @@ struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *);
void nlmclnt_release_call(struct nlm_rqst *);
struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
void nlmclnt_finish_block(struct nlm_wait *block);
int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
void nlmclnt_prepare_block(struct nlm_wait *block, struct nlm_host *host,
struct file_lock *fl);
void nlmclnt_queue_block(struct nlm_wait *block);
__be32 nlmclnt_dequeue_block(struct nlm_wait *block);
int nlmclnt_wait(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
__be32 nlmclnt_grant(const struct sockaddr *addr,
const struct nlm_lock *lock);
void nlmclnt_recovery(struct nlm_host *);
......
......@@ -10,6 +10,7 @@
#include <linux/sunrpc/msg_prot.h>
#include <linux/string.h>
#include <linux/crc32.h>
#include <uapi/linux/nfs.h>
/*
......@@ -44,4 +45,23 @@ enum nfs3_stable_how {
/* used by direct.c to mark verf as invalid */
NFS_INVALID_STABLE_HOW = -1
};
#ifdef CONFIG_CRC32
/**
* nfs_fhandle_hash - calculate the crc32 hash for the filehandle
* @fh - pointer to filehandle
*
* returns a crc32 hash for the filehandle that is compatible with
* the one displayed by "wireshark".
*/
static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
{
return ~crc32_le(0xFFFFFFFF, &fh->data[0], fh->size);
}
#else /* CONFIG_CRC32 */
static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
{
return 0;
}
#endif /* CONFIG_CRC32 */
#endif /* _LINUX_NFS_H */
......@@ -300,17 +300,18 @@ static inline int get_time(char **bpp, time64_t *time)
return 0;
}
static inline time64_t get_expiry(char **bpp)
static inline int get_expiry(char **bpp, time64_t *rvp)
{
time64_t rv;
int error;
struct timespec64 boot;
if (get_time(bpp, &rv))
return 0;
if (rv < 0)
return 0;
error = get_time(bpp, rvp);
if (error)
return error;
getboottime64(&boot);
return rv - boot.tv_sec;
(*rvp) -= boot.tv_sec;
return 0;
}
#endif /* _LINUX_SUNRPC_CACHE_H_ */
......@@ -309,17 +309,6 @@ static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
return (struct sockaddr *) &rqst->rq_daddr;
}
static inline void svc_free_res_pages(struct svc_rqst *rqstp)
{
while (rqstp->rq_next_page != rqstp->rq_respages) {
struct page **pp = --rqstp->rq_next_page;
if (*pp) {
put_page(*pp);
*pp = NULL;
}
}
}
struct svc_deferred_req {
u32 prot; /* protocol (UDP or TCP) */
struct svc_xprt *xprt;
......@@ -422,15 +411,16 @@ struct svc_serv *svc_create(struct svc_program *, unsigned int,
int (*threadfn)(void *data));
struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
struct svc_pool *pool, int node);
void svc_rqst_replace_page(struct svc_rqst *rqstp,
bool svc_rqst_replace_page(struct svc_rqst *rqstp,
struct page *page);
void svc_rqst_release_pages(struct svc_rqst *rqstp);
void svc_rqst_free(struct svc_rqst *);
void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
int (*threadfn)(void *data));
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
int svc_process(struct svc_rqst *);
void svc_process(struct svc_rqst *rqstp);
int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
struct svc_rqst *);
int svc_register(const struct svc_serv *, struct net *, const int,
......
......@@ -27,7 +27,7 @@ struct svc_xprt_ops {
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
void (*xpo_kill_temp_xprt)(struct svc_xprt *);
void (*xpo_start_tls)(struct svc_xprt *);
void (*xpo_handshake)(struct svc_xprt *xprt);
};
struct svc_xprt_class {
......@@ -70,6 +70,9 @@ struct svc_xprt {
#define XPT_LOCAL 12 /* connection from loopback interface */
#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
#define XPT_CONG_CTRL 14 /* has congestion control */
#define XPT_HANDSHAKE 15 /* xprt requests a handshake */
#define XPT_TLS_SESSION 16 /* transport-layer security established */
#define XPT_PEER_AUTH 17 /* peer has been authenticated */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
......
......@@ -38,6 +38,8 @@ struct svc_sock {
/* Number of queued send requests */
atomic_t sk_sendqlen;
struct completion sk_handshake_done;
struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */
};
......@@ -56,7 +58,7 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
*/
void svc_close_net(struct svc_serv *, struct net *);
int svc_recv(struct svc_rqst *, long);
int svc_send(struct svc_rqst *);
void svc_send(struct svc_rqst *rqstp);
void svc_drop(struct svc_rqst *);
void svc_sock_update_bufs(struct svc_serv *serv);
bool svc_alien_sock(struct net *net, int fd);
......
......@@ -69,6 +69,8 @@ extern const struct tls_cipher_size_desc tls_cipher_size_desc[];
#define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
#define TLS_RECORD_TYPE_ALERT 0x15
#define TLS_RECORD_TYPE_HANDSHAKE 0x16
#define TLS_RECORD_TYPE_DATA 0x17
#define TLS_AAD_SPACE_SIZE 13
......
......@@ -1790,6 +1790,31 @@ DEFINE_EVENT(svc_rqst_status, svc_send,
TP_PROTO(const struct svc_rqst *rqst, int status),
TP_ARGS(rqst, status));
TRACE_EVENT(svc_replace_page_err,
TP_PROTO(const struct svc_rqst *rqst),
TP_ARGS(rqst),
TP_STRUCT__entry(
SVC_RQST_ENDPOINT_FIELDS(rqst)
__field(const void *, begin)
__field(const void *, respages)
__field(const void *, nextpage)
),
TP_fast_assign(
SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst);
__entry->begin = rqst->rq_pages;
__entry->respages = rqst->rq_respages;
__entry->nextpage = rqst->rq_next_page;
),
TP_printk(SVC_RQST_ENDPOINT_FORMAT " begin=%p respages=%p nextpage=%p",
SVC_RQST_ENDPOINT_VARARGS,
__entry->begin, __entry->respages, __entry->nextpage)
);
TRACE_EVENT(svc_stats_latency,
TP_PROTO(
const struct svc_rqst *rqst
......@@ -1832,7 +1857,10 @@ TRACE_EVENT(svc_stats_latency,
{ BIT(XPT_CACHE_AUTH), "CACHE_AUTH" }, \
{ BIT(XPT_LOCAL), "LOCAL" }, \
{ BIT(XPT_KILL_TEMP), "KILL_TEMP" }, \
{ BIT(XPT_CONG_CTRL), "CONG_CTRL" })
{ BIT(XPT_CONG_CTRL), "CONG_CTRL" }, \
{ BIT(XPT_HANDSHAKE), "HANDSHAKE" }, \
{ BIT(XPT_TLS_SESSION), "TLS_SESSION" }, \
{ BIT(XPT_PEER_AUTH), "PEER_AUTH" })
TRACE_EVENT(svc_xprt_create_err,
TP_PROTO(
......@@ -1965,6 +1993,17 @@ DEFINE_SVC_XPRT_EVENT(close);
DEFINE_SVC_XPRT_EVENT(detach);
DEFINE_SVC_XPRT_EVENT(free);
#define DEFINE_SVC_TLS_EVENT(name) \
DEFINE_EVENT(svc_xprt_event, svc_tls_##name, \
TP_PROTO(const struct svc_xprt *xprt), \
TP_ARGS(xprt))
DEFINE_SVC_TLS_EVENT(start);
DEFINE_SVC_TLS_EVENT(upcall);
DEFINE_SVC_TLS_EVENT(unavailable);
DEFINE_SVC_TLS_EVENT(not_started);
DEFINE_SVC_TLS_EVENT(timed_out);
TRACE_EVENT(svc_xprt_accept,
TP_PROTO(
const struct svc_xprt *xprt,
......
......@@ -62,5 +62,18 @@
| NFSEXP_ALLSQUASH \
| NFSEXP_INSECURE_PORT)
/*
* Transport layer security policies that are permitted to access
* an export
*/
#define NFSEXP_XPRTSEC_NONE 0x0001
#define NFSEXP_XPRTSEC_TLS 0x0002
#define NFSEXP_XPRTSEC_MTLS 0x0004
#define NFSEXP_XPRTSEC_NUM (3)
#define NFSEXP_XPRTSEC_ALL (NFSEXP_XPRTSEC_NONE | \
NFSEXP_XPRTSEC_TLS | \
NFSEXP_XPRTSEC_MTLS)
#endif /* _UAPINFSD_EXPORT_H */
......@@ -257,11 +257,11 @@ static int rsi_parse(struct cache_detail *cd,
rsii.h.flags = 0;
/* expiry */
expiry = get_expiry(&mesg);
status = -EINVAL;
if (expiry == 0)
status = get_expiry(&mesg, &expiry);
if (status)
goto out;
status = -EINVAL;
/* major/minor */
len = qword_get(&mesg, buf, mlen);
if (len <= 0)
......@@ -483,11 +483,11 @@ static int rsc_parse(struct cache_detail *cd,
rsci.h.flags = 0;
/* expiry */
expiry = get_expiry(&mesg);
status = -EINVAL;
if (expiry == 0)
status = get_expiry(&mesg, &expiry);
if (status)
goto out;
status = -EINVAL;
rscp = rsc_lookup(cd, &rsci);
if (!rscp)
goto out;
......
......@@ -649,6 +649,8 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
if (!rqstp)
return rqstp;
pagevec_init(&rqstp->rq_pvec);
__set_bit(RQ_BUSY, &rqstp->rq_flags);
rqstp->rq_server = serv;
rqstp->rq_pool = pool;
......@@ -842,9 +844,21 @@ EXPORT_SYMBOL_GPL(svc_set_num_threads);
*
* When replacing a page in rq_pages, batch the release of the
* replaced pages to avoid hammering the page allocator.
*
* Return values:
* %true: page replaced
* %false: array bounds checking failed
*/
void svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page)
bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page)
{
struct page **begin = rqstp->rq_pages;
struct page **end = &rqstp->rq_pages[RPCSVC_MAXPAGES];
if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) {
trace_svc_replace_page_err(rqstp);
return false;
}
if (*rqstp->rq_next_page) {
if (!pagevec_space(&rqstp->rq_pvec))
__pagevec_release(&rqstp->rq_pvec);
......@@ -853,9 +867,28 @@ void svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page)
get_page(page);
*(rqstp->rq_next_page++) = page;
return true;
}
EXPORT_SYMBOL_GPL(svc_rqst_replace_page);
/**
* svc_rqst_release_pages - Release Reply buffer pages
* @rqstp: RPC transaction context
*
* Release response pages that might still be in flight after
* svc_send, and any spliced filesystem-owned pages.
*/
void svc_rqst_release_pages(struct svc_rqst *rqstp)
{
int i, count = rqstp->rq_next_page - rqstp->rq_respages;
if (count) {
release_pages(rqstp->rq_respages, count);
for (i = 0; i < count; i++)
rqstp->rq_respages[i] = NULL;
}
}
/*
* Called from a server thread as it's exiting. Caller must hold the "service
* mutex" for the service.
......@@ -863,6 +896,7 @@ EXPORT_SYMBOL_GPL(svc_rqst_replace_page);
void
svc_rqst_free(struct svc_rqst *rqstp)
{
pagevec_release(&rqstp->rq_pvec);
svc_release_buffer(rqstp);
if (rqstp->rq_scratch_page)
put_page(rqstp->rq_scratch_page);
......@@ -1431,11 +1465,12 @@ svc_process_common(struct svc_rqst *rqstp)
goto sendit;
}
/*
* Process the RPC request.
/**
* svc_process - Execute one RPC transaction
* @rqstp: RPC transaction context
*
*/
int
svc_process(struct svc_rqst *rqstp)
void svc_process(struct svc_rqst *rqstp)
{
struct kvec *resv = &rqstp->rq_res.head[0];
__be32 *p;
......@@ -1471,7 +1506,8 @@ svc_process(struct svc_rqst *rqstp)
if (!svc_process_common(rqstp))
goto out_drop;
return svc_send(rqstp);
svc_send(rqstp);
return;
out_baddir:
svc_printk(rqstp, "bad direction 0x%08x, dropping request\n",
......@@ -1479,7 +1515,6 @@ svc_process(struct svc_rqst *rqstp)
rqstp->rq_server->sv_stats->rpcbadfmt++;
out_drop:
svc_drop(rqstp);
return 0;
}
EXPORT_SYMBOL_GPL(svc_process);
......
......@@ -427,7 +427,7 @@ static bool svc_xprt_ready(struct svc_xprt *xprt)
if (xpt_flags & BIT(XPT_BUSY))
return false;
if (xpt_flags & (BIT(XPT_CONN) | BIT(XPT_CLOSE)))
if (xpt_flags & (BIT(XPT_CONN) | BIT(XPT_CLOSE) | BIT(XPT_HANDSHAKE)))
return true;
if (xpt_flags & (BIT(XPT_DATA) | BIT(XPT_DEFERRED))) {
if (xprt->xpt_ops->xpo_has_wspace(xprt) &&
......@@ -541,8 +541,7 @@ static void svc_xprt_release(struct svc_rqst *rqstp)
kfree(rqstp->rq_deferred);
rqstp->rq_deferred = NULL;
pagevec_release(&rqstp->rq_pvec);
svc_free_res_pages(rqstp);
svc_rqst_release_pages(rqstp);
rqstp->rq_res.page_len = 0;
rqstp->rq_res.page_base = 0;
......@@ -667,8 +666,6 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
struct xdr_buf *arg = &rqstp->rq_arg;
unsigned long pages, filled, ret;
pagevec_init(&rqstp->rq_pvec);
pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT;
if (pages > RPCSVC_MAXPAGES) {
pr_warn_once("svc: warning: pages=%lu > RPCSVC_MAXPAGES=%lu\n",
......@@ -704,6 +701,8 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
arg->page_len = (pages-2)*PAGE_SIZE;
arg->len = (pages-1)*PAGE_SIZE;
arg->tail[0].iov_len = 0;
rqstp->rq_xid = xdr_zero;
return 0;
}
......@@ -829,6 +828,9 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
module_put(xprt->xpt_class->xcl_owner);
}
svc_xprt_received(xprt);
} else if (test_bit(XPT_HANDSHAKE, &xprt->xpt_flags)) {
xprt->xpt_ops->xpo_handshake(xprt);
svc_xprt_received(xprt);
} else if (svc_xprt_reserve_slot(rqstp, xprt)) {
/* XPT_DATA|XPT_DEFERRED case: */
dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
......@@ -909,18 +911,20 @@ void svc_drop(struct svc_rqst *rqstp)
}
EXPORT_SYMBOL_GPL(svc_drop);
/*
* Return reply to client.
/**
* svc_send - Return reply to client
* @rqstp: RPC transaction context
*
*/
int svc_send(struct svc_rqst *rqstp)
void svc_send(struct svc_rqst *rqstp)
{
struct svc_xprt *xprt;
int len = -EFAULT;
struct xdr_buf *xb;
int status;
xprt = rqstp->rq_xprt;
if (!xprt)
goto out;
return;
/* calculate over-all length */
xb = &rqstp->rq_res;
......@@ -930,15 +934,10 @@ int svc_send(struct svc_rqst *rqstp)
trace_svc_xdr_sendto(rqstp->rq_xid, xb);
trace_svc_stats_latency(rqstp);
len = xprt->xpt_ops->xpo_sendto(rqstp);
status = xprt->xpt_ops->xpo_sendto(rqstp);
trace_svc_send(rqstp, len);
trace_svc_send(rqstp, status);
svc_xprt_release(rqstp);
if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
len = 0;
out:
return len;
}
/*
......
......@@ -17,8 +17,9 @@
#include <net/ipv6.h>
#include <linux/kernel.h>
#include <linux/user_namespace.h>
#define RPCDBG_FACILITY RPCDBG_AUTH
#include <trace/events/sunrpc.h>
#define RPCDBG_FACILITY RPCDBG_AUTH
#include "netns.h"
......@@ -225,9 +226,9 @@ static int ip_map_parse(struct cache_detail *cd,
return -EINVAL;
}
expiry = get_expiry(&mesg);
if (expiry ==0)
return -EINVAL;
err = get_expiry(&mesg, &expiry);
if (err)
return err;
/* domainname, or empty for NEGATIVE */
len = qword_get(&mesg, buf, mlen);
......@@ -506,9 +507,9 @@ static int unix_gid_parse(struct cache_detail *cd,
uid = make_kuid(current_user_ns(), id);
ug.uid = uid;
expiry = get_expiry(&mesg);
if (expiry == 0)
return -EINVAL;
err = get_expiry(&mesg, &expiry);
if (err)
return err;
rv = get_int(&mesg, &gids);
if (rv || gids < 0 || gids > 8192)
......@@ -832,6 +833,7 @@ svcauth_tls_accept(struct svc_rqst *rqstp)
{
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct svc_cred *cred = &rqstp->rq_cred;
struct svc_xprt *xprt = rqstp->rq_xprt;
u32 flavor, len;
void *body;
__be32 *p;
......@@ -865,14 +867,19 @@ svcauth_tls_accept(struct svc_rqst *rqstp)
if (cred->cr_group_info == NULL)
return SVC_CLOSE;
if (rqstp->rq_xprt->xpt_ops->xpo_start_tls) {
if (xprt->xpt_ops->xpo_handshake) {
p = xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2 + 8);
if (!p)
return SVC_CLOSE;
trace_svc_tls_start(xprt);
*p++ = rpc_auth_null;
*p++ = cpu_to_be32(8);
memcpy(p, "STARTTLS", 8);
set_bit(XPT_HANDSHAKE, &xprt->xpt_flags);
svc_xprt_enqueue(xprt);
} else {
trace_svc_tls_unavailable(xprt);
if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream,
RPC_AUTH_NULL, NULL, 0) < 0)
return SVC_CLOSE;
......
......@@ -43,9 +43,12 @@
#include <net/udp.h>
#include <net/tcp.h>
#include <net/tcp_states.h>
#include <net/tls.h>
#include <net/handshake.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <asm/ioctls.h>
#include <linux/key.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/clnt.h>
......@@ -63,6 +66,12 @@
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
/* To-do: to avoid tying up an nfsd thread while waiting for a
* handshake request, the request could instead be deferred.
*/
enum {
SVC_HANDSHAKE_TO = 5U * HZ
};
static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
int flags);
......@@ -216,6 +225,49 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
return len;
}
static int
svc_tcp_sock_process_cmsg(struct svc_sock *svsk, struct msghdr *msg,
struct cmsghdr *cmsg, int ret)
{
if (cmsg->cmsg_level == SOL_TLS &&
cmsg->cmsg_type == TLS_GET_RECORD_TYPE) {
u8 content_type = *((u8 *)CMSG_DATA(cmsg));
switch (content_type) {
case TLS_RECORD_TYPE_DATA:
/* TLS sets EOR at the end of each application data
* record, even though there might be more frames
* waiting to be decrypted.
*/
msg->msg_flags &= ~MSG_EOR;
break;
case TLS_RECORD_TYPE_ALERT:
ret = -ENOTCONN;
break;
default:
ret = -EAGAIN;
}
}
return ret;
}
static int
svc_tcp_sock_recv_cmsg(struct svc_sock *svsk, struct msghdr *msg)
{
union {
struct cmsghdr cmsg;
u8 buf[CMSG_SPACE(sizeof(u8))];
} u;
int ret;
msg->msg_control = &u;
msg->msg_controllen = sizeof(u);
ret = sock_recvmsg(svsk->sk_sock, msg, MSG_DONTWAIT);
if (unlikely(msg->msg_controllen != sizeof(u)))
ret = svc_tcp_sock_process_cmsg(svsk, msg, &u.cmsg, ret);
return ret;
}
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
static void svc_flush_bvec(const struct bio_vec *bvec, size_t size, size_t seek)
{
......@@ -263,7 +315,7 @@ static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen,
iov_iter_advance(&msg.msg_iter, seek);
buflen -= seek;
}
len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT);
len = svc_tcp_sock_recv_cmsg(svsk, &msg);
if (len > 0)
svc_flush_bvec(bvec, len, seek);
......@@ -315,6 +367,8 @@ static void svc_data_ready(struct sock *sk)
rmb();
svsk->sk_odata(sk);
trace_svcsock_data_ready(&svsk->sk_xprt, 0);
if (test_bit(XPT_HANDSHAKE, &svsk->sk_xprt.xpt_flags))
return;
if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
svc_xprt_enqueue(&svsk->sk_xprt);
}
......@@ -352,6 +406,88 @@ static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt)
sock_no_linger(svsk->sk_sock->sk);
}
/**
* svc_tcp_handshake_done - Handshake completion handler
* @data: address of xprt to wake
* @status: status of handshake
* @peerid: serial number of key containing the remote peer's identity
*
* If a security policy is specified as an export option, we don't
* have a specific export here to check. So we set a "TLS session
* is present" flag on the xprt and let an upper layer enforce local
* security policy.
*/
static void svc_tcp_handshake_done(void *data, int status, key_serial_t peerid)
{
struct svc_xprt *xprt = data;
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
if (!status) {
if (peerid != TLS_NO_PEERID)
set_bit(XPT_PEER_AUTH, &xprt->xpt_flags);
set_bit(XPT_TLS_SESSION, &xprt->xpt_flags);
}
clear_bit(XPT_HANDSHAKE, &xprt->xpt_flags);
complete_all(&svsk->sk_handshake_done);
}
/**
* svc_tcp_handshake - Perform a transport-layer security handshake
* @xprt: connected transport endpoint
*
*/
static void svc_tcp_handshake(struct svc_xprt *xprt)
{
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
struct sock *sk = svsk->sk_sock->sk;
struct tls_handshake_args args = {
.ta_sock = svsk->sk_sock,
.ta_done = svc_tcp_handshake_done,
.ta_data = xprt,
};
int ret;
trace_svc_tls_upcall(xprt);
clear_bit(XPT_TLS_SESSION, &xprt->xpt_flags);
init_completion(&svsk->sk_handshake_done);
ret = tls_server_hello_x509(&args, GFP_KERNEL);
if (ret) {
trace_svc_tls_not_started(xprt);
goto out_failed;
}
ret = wait_for_completion_interruptible_timeout(&svsk->sk_handshake_done,
SVC_HANDSHAKE_TO);
if (ret <= 0) {
if (tls_handshake_cancel(sk)) {
trace_svc_tls_timed_out(xprt);
goto out_close;
}
}
if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags)) {
trace_svc_tls_unavailable(xprt);
goto out_close;
}
/* Mark the transport ready in case the remote sent RPC
* traffic before the kernel received the handshake
* completion downcall.
*/
set_bit(XPT_DATA, &xprt->xpt_flags);
svc_xprt_enqueue(xprt);
return;
out_close:
set_bit(XPT_CLOSE, &xprt->xpt_flags);
out_failed:
clear_bit(XPT_HANDSHAKE, &xprt->xpt_flags);
set_bit(XPT_DATA, &xprt->xpt_flags);
svc_xprt_enqueue(xprt);
}
/*
* See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo
*/
......@@ -877,7 +1013,7 @@ static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
iov.iov_len = want;
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want);
len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT);
len = svc_tcp_sock_recv_cmsg(svsk, &msg);
if (len < 0)
return len;
svsk->sk_tcplen += len;
......@@ -1213,6 +1349,7 @@ static const struct svc_xprt_ops svc_tcp_ops = {
.xpo_has_wspace = svc_tcp_has_wspace,
.xpo_accept = svc_tcp_accept,
.xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt,
.xpo_handshake = svc_tcp_handshake,
};
static struct svc_xprt_class svc_tcp_class = {
......@@ -1293,27 +1430,38 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
struct socket *sock,
int flags)
{
struct file *filp = NULL;
struct svc_sock *svsk;
struct sock *inet;
int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
int err = 0;
svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
if (!svsk)
return ERR_PTR(-ENOMEM);
if (!sock->file) {
filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
if (IS_ERR(filp)) {
kfree(svsk);
return ERR_CAST(filp);
}
}
inet = sock->sk;
/* Register socket with portmapper */
if (pmap_register)
if (pmap_register) {
int err;
err = svc_register(serv, sock_net(sock->sk), inet->sk_family,
inet->sk_protocol,
ntohs(inet_sk(inet)->inet_sport));
if (err < 0) {
if (filp)
fput(filp);
kfree(svsk);
return ERR_PTR(err);
}
}
svsk->sk_sock = sock;
svsk->sk_sk = inet;
......@@ -1525,10 +1673,12 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt)
static void svc_sock_free(struct svc_xprt *xprt)
{
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
struct socket *sock = svsk->sk_sock;
if (svsk->sk_sock->file)
sockfd_put(svsk->sk_sock);
tls_handshake_cancel(sock->sk);
if (sock->file)
sockfd_put(sock);
else
sock_release(svsk->sk_sock);
sock_release(sock);
kfree(svsk);
}
......@@ -212,24 +212,6 @@ static struct ctl_table svcrdma_parm_table[] = {
{ },
};
static struct ctl_table svcrdma_table[] = {
{
.procname = "svc_rdma",
.mode = 0555,
.child = svcrdma_parm_table
},
{ },
};
static struct ctl_table svcrdma_root_table[] = {
{
.procname = "sunrpc",
.mode = 0555,
.child = svcrdma_table
},
{ },
};
static void svc_rdma_proc_cleanup(void)
{
if (!svcrdma_table_header)
......@@ -263,7 +245,8 @@ static int svc_rdma_proc_init(void)
if (rc)
goto out_err;
svcrdma_table_header = register_sysctl_table(svcrdma_root_table);
svcrdma_table_header = register_sysctl("sunrpc/svc_rdma",
svcrdma_parm_table);
return 0;
out_err:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment