Commit eff8313b authored by David S. Miller's avatar David S. Miller

Merge branch 'tcp-ao'

Dmitry Safonov says:

====================
net/tcp: Add TCP-AO support

This is version 16 of TCP-AO support. It addresses the build warning
in the middle of patch set, reported by kernel test robot.

There's one Sparse warning introduced by tcp_sigpool_start():
__cond_acquires() seems to currently being broken. I've described
the reasoning for it on v9 cover letter. Also, checkpatch.pl warnings
were addressed, but yet I've left the ones that are more personal
preferences (i.e. 80 columns limit). Please, ping me if you have
a strong feeling about one of them.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cc54d2e2 7fe0e38b
......@@ -106,6 +106,7 @@ Contents:
sysfs-tagging
tc-actions-env-rules
tc-queue-filters
tcp_ao
tcp-thin
team
timestamping
......
This diff is collapsed.
......@@ -55,6 +55,29 @@ static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
return copy_from_sockptr_offset(dst, src, 0, size);
}
static inline int copy_struct_from_sockptr(void *dst, size_t ksize,
sockptr_t src, size_t usize)
{
size_t size = min(ksize, usize);
size_t rest = max(ksize, usize) - size;
if (!sockptr_is_kernel(src))
return copy_struct_from_user(dst, ksize, src.user, size);
if (usize < ksize) {
memset(dst + size, 0, rest);
} else if (usize > ksize) {
char *p = src.kernel;
while (rest--) {
if (*p++)
return -E2BIG;
}
}
memcpy(dst, src.kernel, size);
return 0;
}
static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
const void *src, size_t size)
{
......
......@@ -166,6 +166,11 @@ struct tcp_request_sock {
* after data-in-SYN.
*/
u8 syn_tos;
#ifdef CONFIG_TCP_AO
u8 ao_keyid;
u8 ao_rcv_next;
u8 maclen;
#endif
};
static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
......@@ -173,6 +178,19 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
return (struct tcp_request_sock *)req;
}
static inline bool tcp_rsk_used_ao(const struct request_sock *req)
{
/* The real length of MAC is saved in the request socket,
* signing anything with zero-length makes no sense, so here is
* a little hack..
*/
#ifndef CONFIG_TCP_AO
return false;
#else
return tcp_rsk(req)->maclen != 0;
#endif
}
#define TCP_RMEM_TO_WIN_SCALE 8
struct tcp_sock {
......@@ -447,13 +465,18 @@ struct tcp_sock {
bool syn_smc; /* SYN includes SMC */
#endif
#ifdef CONFIG_TCP_MD5SIG
/* TCP AF-Specific parts; only used by MD5 Signature support so far */
#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
/* TCP AF-Specific parts; only used by TCP-AO/MD5 Signature support so far */
const struct tcp_sock_af_ops *af_specific;
#ifdef CONFIG_TCP_MD5SIG
/* TCP MD5 Signature Option information */
struct tcp_md5sig_info __rcu *md5sig_info;
#endif
#ifdef CONFIG_TCP_AO
struct tcp_ao_info __rcu *ao_info;
#endif
#endif
/* TCP fastopen related information */
struct tcp_fastopen_request *fastopen_req;
......@@ -509,6 +532,9 @@ struct tcp_timewait_sock {
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tw_md5_key;
#endif
#ifdef CONFIG_TCP_AO
struct tcp_ao_info __rcu *ao_info;
#endif
};
static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
......
......@@ -20,9 +20,14 @@
FN(IP_NOPROTO) \
FN(SOCKET_RCVBUFF) \
FN(PROTO_MEM) \
FN(TCP_AUTH_HDR) \
FN(TCP_MD5NOTFOUND) \
FN(TCP_MD5UNEXPECTED) \
FN(TCP_MD5FAILURE) \
FN(TCP_AONOTFOUND) \
FN(TCP_AOUNEXPECTED) \
FN(TCP_AOKEYNOTFOUND) \
FN(TCP_AOFAILURE) \
FN(SOCKET_BACKLOG) \
FN(TCP_FLAGS) \
FN(TCP_ZEROWINDOW) \
......@@ -142,6 +147,11 @@ enum skb_drop_reason {
* drop out of udp_memory_allocated.
*/
SKB_DROP_REASON_PROTO_MEM,
/**
* @SKB_DROP_REASON_TCP_AUTH_HDR: TCP-MD5 or TCP-AO hashes are met
* twice or set incorrectly.
*/
SKB_DROP_REASON_TCP_AUTH_HDR,
/**
* @SKB_DROP_REASON_TCP_MD5NOTFOUND: no MD5 hash and one expected,
* corresponding to LINUX_MIB_TCPMD5NOTFOUND
......@@ -157,6 +167,26 @@ enum skb_drop_reason {
* to LINUX_MIB_TCPMD5FAILURE
*/
SKB_DROP_REASON_TCP_MD5FAILURE,
/**
* @SKB_DROP_REASON_TCP_AONOTFOUND: no TCP-AO hash and one was expected,
* corresponding to LINUX_MIB_TCPAOREQUIRED
*/
SKB_DROP_REASON_TCP_AONOTFOUND,
/**
* @SKB_DROP_REASON_TCP_AOUNEXPECTED: TCP-AO hash is present and it
* was not expected, corresponding to LINUX_MIB_TCPAOKEYNOTFOUND
*/
SKB_DROP_REASON_TCP_AOUNEXPECTED,
/**
* @SKB_DROP_REASON_TCP_AOKEYNOTFOUND: TCP-AO key is unknown,
* corresponding to LINUX_MIB_TCPAOKEYNOTFOUND
*/
SKB_DROP_REASON_TCP_AOKEYNOTFOUND,
/**
* @SKB_DROP_REASON_TCP_AOFAILURE: TCP-AO hash is wrong,
* corresponding to LINUX_MIB_TCPAOBAD
*/
SKB_DROP_REASON_TCP_AOFAILURE,
/**
* @SKB_DROP_REASON_SOCKET_BACKLOG: failed to add skb to socket backlog (
* see LINUX_MIB_TCPBACKLOGDROP)
......
This diff is collapsed.
This diff is collapsed.
......@@ -297,6 +297,11 @@ enum
LINUX_MIB_TCPMIGRATEREQSUCCESS, /* TCPMigrateReqSuccess */
LINUX_MIB_TCPMIGRATEREQFAILURE, /* TCPMigrateReqFailure */
LINUX_MIB_TCPPLBREHASH, /* TCPPLBRehash */
LINUX_MIB_TCPAOREQUIRED, /* TCPAORequired */
LINUX_MIB_TCPAOBAD, /* TCPAOBad */
LINUX_MIB_TCPAOKEYNOTFOUND, /* TCPAOKeyNotFound */
LINUX_MIB_TCPAOGOOD, /* TCPAOGood */
LINUX_MIB_TCPAODROPPEDICMPS, /* TCPAODroppedIcmps */
__LINUX_MIB_MAX
};
......
......@@ -129,6 +129,11 @@ enum {
#define TCP_TX_DELAY 37 /* delay outgoing packets by XX usec */
#define TCP_AO_ADD_KEY 38 /* Add/Set MKT */
#define TCP_AO_DEL_KEY 39 /* Delete MKT */
#define TCP_AO_INFO 40 /* Set/list TCP-AO per-socket options */
#define TCP_AO_GET_KEYS 41 /* List MKT(s) */
#define TCP_AO_REPAIR 42 /* Get/Set SNEs and ISNs */
#define TCP_REPAIR_ON 1
#define TCP_REPAIR_OFF 0
......@@ -361,6 +366,106 @@ struct tcp_diag_md5sig {
__u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN];
};
#define TCP_AO_MAXKEYLEN 80
#define TCP_AO_KEYF_IFINDEX (1 << 0) /* L3 ifindex for VRF */
#define TCP_AO_KEYF_EXCLUDE_OPT (1 << 1) /* "Indicates whether TCP
* options other than TCP-AO
* are included in the MAC
* calculation"
*/
struct tcp_ao_add { /* setsockopt(TCP_AO_ADD_KEY) */
struct __kernel_sockaddr_storage addr; /* peer's address for the key */
char alg_name[64]; /* crypto hash algorithm to use */
__s32 ifindex; /* L3 dev index for VRF */
__u32 set_current :1, /* set key as Current_key at once */
set_rnext :1, /* request it from peer with RNext_key */
reserved :30; /* must be 0 */
__u16 reserved2; /* padding, must be 0 */
__u8 prefix; /* peer's address prefix */
__u8 sndid; /* SendID for outgoing segments */
__u8 rcvid; /* RecvID to match for incoming seg */
__u8 maclen; /* length of authentication code (hash) */
__u8 keyflags; /* see TCP_AO_KEYF_ */
__u8 keylen; /* length of ::key */
__u8 key[TCP_AO_MAXKEYLEN];
} __attribute__((aligned(8)));
struct tcp_ao_del { /* setsockopt(TCP_AO_DEL_KEY) */
struct __kernel_sockaddr_storage addr; /* peer's address for the key */
__s32 ifindex; /* L3 dev index for VRF */
__u32 set_current :1, /* corresponding ::current_key */
set_rnext :1, /* corresponding ::rnext */
del_async :1, /* only valid for listen sockets */
reserved :29; /* must be 0 */
__u16 reserved2; /* padding, must be 0 */
__u8 prefix; /* peer's address prefix */
__u8 sndid; /* SendID for outgoing segments */
__u8 rcvid; /* RecvID to match for incoming seg */
__u8 current_key; /* KeyID to set as Current_key */
__u8 rnext; /* KeyID to set as Rnext_key */
__u8 keyflags; /* see TCP_AO_KEYF_ */
} __attribute__((aligned(8)));
struct tcp_ao_info_opt { /* setsockopt(TCP_AO_INFO), getsockopt(TCP_AO_INFO) */
/* Here 'in' is for setsockopt(), 'out' is for getsockopt() */
__u32 set_current :1, /* in/out: corresponding ::current_key */
set_rnext :1, /* in/out: corresponding ::rnext */
ao_required :1, /* in/out: don't accept non-AO connects */
set_counters :1, /* in: set/clear ::pkt_* counters */
accept_icmps :1, /* in/out: accept incoming ICMPs */
reserved :27; /* must be 0 */
__u16 reserved2; /* padding, must be 0 */
__u8 current_key; /* in/out: KeyID of Current_key */
__u8 rnext; /* in/out: keyid of RNext_key */
__u64 pkt_good; /* in/out: verified segments */
__u64 pkt_bad; /* in/out: failed verification */
__u64 pkt_key_not_found; /* in/out: could not find a key to verify */
__u64 pkt_ao_required; /* in/out: segments missing TCP-AO sign */
__u64 pkt_dropped_icmp; /* in/out: ICMPs that were ignored */
} __attribute__((aligned(8)));
struct tcp_ao_getsockopt { /* getsockopt(TCP_AO_GET_KEYS) */
struct __kernel_sockaddr_storage addr; /* in/out: dump keys for peer
* with this address/prefix
*/
char alg_name[64]; /* out: crypto hash algorithm */
__u8 key[TCP_AO_MAXKEYLEN];
__u32 nkeys; /* in: size of the userspace buffer
* @optval, measured in @optlen - the
* sizeof(struct tcp_ao_getsockopt)
* out: number of keys that matched
*/
__u16 is_current :1, /* in: match and dump Current_key,
* out: the dumped key is Current_key
*/
is_rnext :1, /* in: match and dump RNext_key,
* out: the dumped key is RNext_key
*/
get_all :1, /* in: dump all keys */
reserved :13; /* padding, must be 0 */
__u8 sndid; /* in/out: dump keys with SendID */
__u8 rcvid; /* in/out: dump keys with RecvID */
__u8 prefix; /* in/out: dump keys with address/prefix */
__u8 maclen; /* out: key's length of authentication
* code (hash)
*/
__u8 keyflags; /* in/out: see TCP_AO_KEYF_ */
__u8 keylen; /* out: length of ::key */
__s32 ifindex; /* in/out: L3 dev index for VRF */
__u64 pkt_good; /* out: verified segments */
__u64 pkt_bad; /* out: segments that failed verification */
} __attribute__((aligned(8)));
struct tcp_ao_repair { /* {s,g}etsockopt(TCP_AO_REPAIR) */
__be32 snt_isn;
__be32 rcv_isn;
__u32 snd_sne;
__u32 rcv_sne;
} __attribute__((aligned(8)));
/* setsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, ...) */
#define TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT 0x1
......
......@@ -741,10 +741,27 @@ config DEFAULT_TCP_CONG
default "bbr" if DEFAULT_BBR
default "cubic"
config TCP_SIGPOOL
tristate
config TCP_AO
bool "TCP: Authentication Option (RFC5925)"
select CRYPTO
select TCP_SIGPOOL
depends on 64BIT && IPV6 != m # seq-number extension needs WRITE_ONCE(u64)
help
TCP-AO specifies the use of stronger Message Authentication Codes (MACs),
protects against replays for long-lived TCP connections, and
provides more details on the association of security with TCP
connections than TCP MD5 (See RFC5925)
If unsure, say N.
config TCP_MD5SIG
bool "TCP: MD5 Signature Option support (RFC2385)"
select CRYPTO
select CRYPTO_MD5
select TCP_SIGPOOL
help
RFC2385 specifies a method of giving MD5 protection to TCP sessions.
Its main (only?) use is to protect BGP sessions between core routers
......
......@@ -62,12 +62,14 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
obj-$(CONFIG_TCP_SIGPOOL) += tcp_sigpool.o
obj-$(CONFIG_NET_SOCK_MSG) += tcp_bpf.o
obj-$(CONFIG_BPF_SYSCALL) += udp_bpf.o
obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
xfrm4_output.o xfrm4_protocol.o
obj-$(CONFIG_TCP_AO) += tcp_ao.o
ifeq ($(CONFIG_BPF_JIT),y)
obj-$(CONFIG_BPF_SYSCALL) += bpf_tcp_ca.o
......
......@@ -299,6 +299,11 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPMigrateReqSuccess", LINUX_MIB_TCPMIGRATEREQSUCCESS),
SNMP_MIB_ITEM("TCPMigrateReqFailure", LINUX_MIB_TCPMIGRATEREQFAILURE),
SNMP_MIB_ITEM("TCPPLBRehash", LINUX_MIB_TCPPLBREHASH),
SNMP_MIB_ITEM("TCPAORequired", LINUX_MIB_TCPAOREQUIRED),
SNMP_MIB_ITEM("TCPAOBad", LINUX_MIB_TCPAOBAD),
SNMP_MIB_ITEM("TCPAOKeyNotFound", LINUX_MIB_TCPAOKEYNOTFOUND),
SNMP_MIB_ITEM("TCPAOGood", LINUX_MIB_TCPAOGOOD),
SNMP_MIB_ITEM("TCPAODroppedIcmps", LINUX_MIB_TCPAODROPPEDICMPS),
SNMP_MIB_SENTINEL
};
......
......@@ -344,6 +344,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
__u8 rcv_wscale;
struct flowi4 fl4;
u32 tsoff = 0;
int l3index;
if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) ||
!th->ack || th->rst)
......@@ -405,6 +406,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
tcp_ao_syncookie(sk, skb, treq, AF_INET, l3index);
/* We throwed the options of the initial SYN away, so we hope
* the ACK carries the same options again (see RFC1122 4.2.3.8)
*/
......
......@@ -3593,6 +3593,31 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
__tcp_sock_set_quickack(sk, val);
break;
case TCP_AO_REPAIR:
err = tcp_ao_set_repair(sk, optval, optlen);
break;
#ifdef CONFIG_TCP_AO
case TCP_AO_ADD_KEY:
case TCP_AO_DEL_KEY:
case TCP_AO_INFO: {
/* If this is the first TCP-AO setsockopt() on the socket,
* sk_state has to be LISTEN or CLOSE. Allow TCP_REPAIR
* in any state.
*/
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
goto ao_parse;
if (rcu_dereference_protected(tcp_sk(sk)->ao_info,
lockdep_sock_is_held(sk)))
goto ao_parse;
if (tp->repair)
goto ao_parse;
err = -EISCONN;
break;
ao_parse:
err = tp->af_specific->ao_parse(sk, optname, optval, optlen);
break;
}
#endif
#ifdef CONFIG_TCP_MD5SIG
case TCP_MD5SIG:
case TCP_MD5SIG_EXT:
......@@ -4267,6 +4292,21 @@ int do_tcp_getsockopt(struct sock *sk, int level,
return err;
}
#endif
case TCP_AO_REPAIR:
return tcp_ao_get_repair(sk, optval, optlen);
case TCP_AO_GET_KEYS:
case TCP_AO_INFO: {
int err;
sockopt_lock_sock(sk);
if (optname == TCP_AO_GET_KEYS)
err = tcp_ao_get_mkts(sk, optval, optlen);
else
err = tcp_ao_get_sock_info(sk, optval, optlen);
sockopt_release_sock(sk);
return err;
}
default:
return -ENOPROTOOPT;
}
......@@ -4305,141 +4345,52 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
EXPORT_SYMBOL(tcp_getsockopt);
#ifdef CONFIG_TCP_MD5SIG
static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
static DEFINE_MUTEX(tcp_md5sig_mutex);
static bool tcp_md5sig_pool_populated = false;
int tcp_md5_sigpool_id = -1;
EXPORT_SYMBOL_GPL(tcp_md5_sigpool_id);
static void __tcp_alloc_md5sig_pool(void)
int tcp_md5_alloc_sigpool(void)
{
struct crypto_ahash *hash;
int cpu;
hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hash))
return;
for_each_possible_cpu(cpu) {
void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch;
struct ahash_request *req;
if (!scratch) {
scratch = kmalloc_node(sizeof(union tcp_md5sum_block) +
sizeof(struct tcphdr),
GFP_KERNEL,
cpu_to_node(cpu));
if (!scratch)
return;
per_cpu(tcp_md5sig_pool, cpu).scratch = scratch;
}
if (per_cpu(tcp_md5sig_pool, cpu).md5_req)
continue;
req = ahash_request_alloc(hash, GFP_KERNEL);
if (!req)
return;
ahash_request_set_callback(req, 0, NULL, NULL);
per_cpu(tcp_md5sig_pool, cpu).md5_req = req;
}
/* before setting tcp_md5sig_pool_populated, we must commit all writes
* to memory. See smp_rmb() in tcp_get_md5sig_pool()
*/
smp_wmb();
/* Paired with READ_ONCE() from tcp_alloc_md5sig_pool()
* and tcp_get_md5sig_pool().
*/
WRITE_ONCE(tcp_md5sig_pool_populated, true);
}
bool tcp_alloc_md5sig_pool(void)
{
/* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */
if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) {
mutex_lock(&tcp_md5sig_mutex);
if (!tcp_md5sig_pool_populated)
__tcp_alloc_md5sig_pool();
size_t scratch_size;
int ret;
mutex_unlock(&tcp_md5sig_mutex);
scratch_size = sizeof(union tcp_md5sum_block) + sizeof(struct tcphdr);
ret = tcp_sigpool_alloc_ahash("md5", scratch_size);
if (ret >= 0) {
/* As long as any md5 sigpool was allocated, the return
* id would stay the same. Re-write the id only for the case
* when previously all MD5 keys were deleted and this call
* allocates the first MD5 key, which may return a different
* sigpool id than was used previously.
*/
WRITE_ONCE(tcp_md5_sigpool_id, ret); /* Avoids the compiler potentially being smart here */
return 0;
}
/* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */
return READ_ONCE(tcp_md5sig_pool_populated);
return ret;
}
EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
/**
* tcp_get_md5sig_pool - get md5sig_pool for this user
*
* We use percpu structure, so if we succeed, we exit with preemption
* and BH disabled, to make sure another thread or softirq handling
* wont try to get same context.
*/
struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
void tcp_md5_release_sigpool(void)
{
local_bh_disable();
/* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */
if (READ_ONCE(tcp_md5sig_pool_populated)) {
/* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
smp_rmb();
return this_cpu_ptr(&tcp_md5sig_pool);
}
local_bh_enable();
return NULL;
tcp_sigpool_release(READ_ONCE(tcp_md5_sigpool_id));
}
EXPORT_SYMBOL(tcp_get_md5sig_pool);
int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
const struct sk_buff *skb, unsigned int header_len)
void tcp_md5_add_sigpool(void)
{
struct scatterlist sg;
const struct tcphdr *tp = tcp_hdr(skb);
struct ahash_request *req = hp->md5_req;
unsigned int i;
const unsigned int head_data_len = skb_headlen(skb) > header_len ?
skb_headlen(skb) - header_len : 0;
const struct skb_shared_info *shi = skb_shinfo(skb);
struct sk_buff *frag_iter;
sg_init_table(&sg, 1);
sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
ahash_request_set_crypt(req, &sg, NULL, head_data_len);
if (crypto_ahash_update(req))
return 1;
for (i = 0; i < shi->nr_frags; ++i) {
const skb_frag_t *f = &shi->frags[i];
unsigned int offset = skb_frag_off(f);
struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
sg_set_page(&sg, page, skb_frag_size(f),
offset_in_page(offset));
ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f));
if (crypto_ahash_update(req))
return 1;
}
skb_walk_frags(skb, frag_iter)
if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
return 1;
return 0;
tcp_sigpool_get(READ_ONCE(tcp_md5_sigpool_id));
}
EXPORT_SYMBOL(tcp_md5_hash_skb_data);
int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
int tcp_md5_hash_key(struct tcp_sigpool *hp,
const struct tcp_md5sig_key *key)
{
u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */
struct scatterlist sg;
sg_init_one(&sg, key->key, keylen);
ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen);
ahash_request_set_crypt(hp->req, &sg, NULL, keylen);
/* We use data_race() because tcp_md5_do_add() might change key->key under us */
return data_race(crypto_ahash_update(hp->md5_req));
/* We use data_race() because tcp_md5_do_add() might change
* key->key under us
*/
return data_race(crypto_ahash_update(hp->req));
}
EXPORT_SYMBOL(tcp_md5_hash_key);
......@@ -4447,42 +4398,24 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
const void *saddr, const void *daddr,
int family, int dif, int sdif)
int family, int l3index, const __u8 *hash_location)
{
/*
* This gets called for each TCP segment that arrives
* so we want to be efficient.
/* This gets called for each TCP segment that has TCP-MD5 option.
* We have 3 drop cases:
* o No MD5 hash and one expected.
* o MD5 hash and we're not expecting one.
* o MD5 hash and its wrong.
*/
const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected;
const struct tcphdr *th = tcp_hdr(skb);
const struct tcp_sock *tp = tcp_sk(sk);
int genhash, l3index;
struct tcp_md5sig_key *key;
u8 newhash[16];
int genhash;
/* sdif set, means packet ingressed via a device
* in an L3 domain and dif is set to the l3mdev
*/
l3index = sdif ? dif : 0;
hash_expected = tcp_md5_do_lookup(sk, l3index, saddr, family);
hash_location = tcp_parse_md5sig_option(th);
/* We've parsed the options - do we have a hash? */
if (!hash_expected && !hash_location)
return SKB_NOT_DROPPED_YET;
if (hash_expected && !hash_location) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return SKB_DROP_REASON_TCP_MD5NOTFOUND;
}
key = tcp_md5_do_lookup(sk, l3index, saddr, family);
if (!hash_expected && hash_location) {
if (!key && hash_location) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
tcp_hash_fail("Unexpected MD5 Hash found", family, skb, "");
return SKB_DROP_REASON_TCP_MD5UNEXPECTED;
}
......@@ -4491,27 +4424,26 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
* IPv4-mapped case.
*/
if (family == AF_INET)
genhash = tcp_v4_md5_hash_skb(newhash,
hash_expected,
NULL, skb);
genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
else
genhash = tp->af_specific->calc_md5_hash(newhash,
hash_expected,
genhash = tp->af_specific->calc_md5_hash(newhash, key,
NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
if (family == AF_INET) {
net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
saddr, ntohs(th->source),
daddr, ntohs(th->dest),
genhash ? " tcp_v4_calc_md5_hash failed"
: "", l3index);
tcp_hash_fail("MD5 Hash failed", AF_INET, skb, "%s L3 index %d",
genhash ? "tcp_v4_calc_md5_hash failed"
: "", l3index);
} else {
net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
genhash ? "failed" : "mismatch",
saddr, ntohs(th->source),
daddr, ntohs(th->dest), l3index);
if (genhash) {
tcp_hash_fail("MD5 Hash failed",
AF_INET6, skb, "L3 index %d",
l3index);
} else {
tcp_hash_fail("MD5 Hash mismatch",
AF_INET6, skb, "L3 index %d",
l3index);
}
}
return SKB_DROP_REASON_TCP_MD5FAILURE;
}
......
This diff is collapsed.
......@@ -3572,6 +3572,21 @@ static inline bool tcp_may_update_window(const struct tcp_sock *tp,
(ack_seq == tp->snd_wl1 && (nwin > tp->snd_wnd || !nwin));
}
static void tcp_snd_sne_update(struct tcp_sock *tp, u32 ack)
{
#ifdef CONFIG_TCP_AO
struct tcp_ao_info *ao;
if (!static_branch_unlikely(&tcp_ao_needed.key))
return;
ao = rcu_dereference_protected(tp->ao_info,
lockdep_sock_is_held((struct sock *)tp));
if (ao && ack < tp->snd_una)
ao->snd_sne++;
#endif
}
/* If we update tp->snd_una, also update tp->bytes_acked */
static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
{
......@@ -3579,9 +3594,25 @@ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
sock_owned_by_me((struct sock *)tp);
tp->bytes_acked += delta;
tcp_snd_sne_update(tp, ack);
tp->snd_una = ack;
}
static void tcp_rcv_sne_update(struct tcp_sock *tp, u32 seq)
{
#ifdef CONFIG_TCP_AO
struct tcp_ao_info *ao;
if (!static_branch_unlikely(&tcp_ao_needed.key))
return;
ao = rcu_dereference_protected(tp->ao_info,
lockdep_sock_is_held((struct sock *)tp));
if (ao && seq < tp->rcv_nxt)
ao->rcv_sne++;
#endif
}
/* If we update tp->rcv_nxt, also update tp->bytes_received */
static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
{
......@@ -3589,6 +3620,7 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
sock_owned_by_me((struct sock *)tp);
tp->bytes_received += delta;
tcp_rcv_sne_update(tp, seq);
WRITE_ONCE(tp->rcv_nxt, seq);
}
......@@ -4255,39 +4287,58 @@ static bool tcp_fast_parse_options(const struct net *net,
return true;
}
#ifdef CONFIG_TCP_MD5SIG
#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
/*
* Parse MD5 Signature option
* Parse Signature options
*/
const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
int tcp_do_parse_auth_options(const struct tcphdr *th,
const u8 **md5_hash, const u8 **ao_hash)
{
int length = (th->doff << 2) - sizeof(*th);
const u8 *ptr = (const u8 *)(th + 1);
unsigned int minlen = TCPOLEN_MD5SIG;
if (IS_ENABLED(CONFIG_TCP_AO))
minlen = sizeof(struct tcp_ao_hdr) + 1;
*md5_hash = NULL;
*ao_hash = NULL;
/* If not enough data remaining, we can short cut */
while (length >= TCPOLEN_MD5SIG) {
while (length >= minlen) {
int opcode = *ptr++;
int opsize;
switch (opcode) {
case TCPOPT_EOL:
return NULL;
return 0;
case TCPOPT_NOP:
length--;
continue;
default:
opsize = *ptr++;
if (opsize < 2 || opsize > length)
return NULL;
if (opcode == TCPOPT_MD5SIG)
return opsize == TCPOLEN_MD5SIG ? ptr : NULL;
return -EINVAL;
if (opcode == TCPOPT_MD5SIG) {
if (opsize != TCPOLEN_MD5SIG)
return -EINVAL;
if (unlikely(*md5_hash || *ao_hash))
return -EEXIST;
*md5_hash = ptr;
} else if (opcode == TCPOPT_AO) {
if (opsize <= sizeof(struct tcp_ao_hdr))
return -EINVAL;
if (unlikely(*md5_hash || *ao_hash))
return -EEXIST;
*ao_hash = ptr;
}
}
ptr += opsize - 2;
length -= opsize;
}
return NULL;
return 0;
}
EXPORT_SYMBOL(tcp_parse_md5sig_option);
EXPORT_SYMBOL(tcp_do_parse_auth_options);
#endif
/* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
......@@ -6151,6 +6202,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_ao_finish_connect(sk, skb);
tcp_set_state(sk, TCP_ESTABLISHED);
icsk->icsk_ack.lrcvtime = tcp_jiffies32;
......@@ -6436,6 +6488,16 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* simultaneous connect with crossed SYNs.
* Particularly, it can be connect to self.
*/
#ifdef CONFIG_TCP_AO
struct tcp_ao_info *ao;
ao = rcu_dereference_protected(tp->ao_info,
lockdep_sock_is_held(sk));
if (ao) {
WRITE_ONCE(ao->risn, th->seq);
ao->rcv_sne = 0;
}
#endif
tcp_set_state(sk, TCP_SYN_RECV);
if (tp->rx_opt.saw_tstamp) {
......@@ -6648,6 +6710,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
skb);
WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
}
tcp_ao_established(sk);
smp_mb();
tcp_set_state(sk, TCP_ESTABLISHED);
sk->sk_state_change(sk);
......@@ -7024,6 +7087,10 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
struct flowi fl;
u8 syncookies;
#ifdef CONFIG_TCP_AO
const struct tcp_ao_hdr *aoh;
#endif
syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
/* TW buckets are converted to open requests without
......@@ -7110,6 +7177,17 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
inet_rsk(req)->ecn_ok = 0;
}
#ifdef CONFIG_TCP_AO
if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
goto drop_and_release; /* Invalid TCP options */
if (aoh) {
tcp_rsk(req)->maclen = aoh->length - sizeof(struct tcp_ao_hdr);
tcp_rsk(req)->ao_rcv_next = aoh->keyid;
tcp_rsk(req)->ao_keyid = aoh->rnext_keyid;
} else {
tcp_rsk(req)->maclen = 0;
}
#endif
tcp_rsk(req)->snt_isn = isn;
tcp_rsk(req)->txhash = net_tx_rndhash();
tcp_rsk(req)->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
......
This diff is collapsed.
......@@ -51,6 +51,18 @@ tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
return TCP_TW_SUCCESS;
}
static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq)
{
#ifdef CONFIG_TCP_AO
struct tcp_ao_info *ao;
ao = rcu_dereference(tcptw->ao_info);
if (unlikely(ao && seq < tcptw->tw_rcv_nxt))
WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
#endif
tcptw->tw_rcv_nxt = seq;
}
/*
* * Main purpose of TIME-WAIT state is to close connection gracefully,
* when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
......@@ -136,7 +148,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
/* FIN arrived, enter true time-wait state. */
tw->tw_substate = TCP_TIME_WAIT;
tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq);
if (tmp_opt.saw_tstamp) {
tcptw->tw_ts_recent_stamp = ktime_get_seconds();
tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
......@@ -261,10 +274,9 @@ static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
if (!tcptw->tw_md5_key)
return;
if (!tcp_alloc_md5sig_pool())
goto out_free;
if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
goto out_free;
tcp_md5_add_sigpool();
}
return;
out_free:
......@@ -280,7 +292,7 @@ static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
void tcp_time_wait(struct sock *sk, int state, int timeo)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
struct inet_timewait_sock *tw;
......@@ -317,6 +329,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
#endif
tcp_time_wait_init(sk, tcptw);
tcp_ao_time_wait(tcptw, tp);
/* Get the TIME_WAIT timeout firing. */
if (timeo < rto)
......@@ -349,18 +362,29 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
}
EXPORT_SYMBOL(tcp_time_wait);
#ifdef CONFIG_TCP_MD5SIG
static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
{
struct tcp_md5sig_key *key;
key = container_of(head, struct tcp_md5sig_key, rcu);
kfree(key);
static_branch_slow_dec_deferred(&tcp_md5_needed);
tcp_md5_release_sigpool();
}
#endif
void tcp_twsk_destructor(struct sock *sk)
{
#ifdef CONFIG_TCP_MD5SIG
if (static_branch_unlikely(&tcp_md5_needed.key)) {
struct tcp_timewait_sock *twsk = tcp_twsk(sk);
if (twsk->tw_md5_key) {
kfree_rcu(twsk->tw_md5_key, rcu);
static_branch_slow_dec_deferred(&tcp_md5_needed);
}
if (twsk->tw_md5_key)
call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
}
#endif
tcp_ao_destroy_sock(sk, true);
}
EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
......@@ -495,6 +519,9 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
const struct tcp_sock *oldtp;
struct tcp_sock *newtp;
u32 seq;
#ifdef CONFIG_TCP_AO
struct tcp_ao_key *ao_key;
#endif
if (!newsk)
return NULL;
......@@ -583,6 +610,13 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
#ifdef CONFIG_TCP_MD5SIG
newtp->md5sig_info = NULL; /*XXX*/
#endif
#ifdef CONFIG_TCP_AO
newtp->ao_info = NULL;
ao_key = treq->af_specific->ao_lookup(sk, req,
tcp_rsk(req)->ao_keyid, -1);
if (ao_key)
newtp->tcp_header_len += tcp_ao_len(ao_key);
#endif
if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
......
This diff is collapsed.
This diff is collapsed.
......@@ -52,4 +52,5 @@ obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
ifneq ($(CONFIG_IPV6),)
obj-$(CONFIG_NET_UDP_TUNNEL) += ip6_udp_tunnel.o
obj-y += mcast_snoop.o
obj-$(CONFIG_TCP_AO) += tcp_ao.o
endif
......@@ -140,6 +140,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
struct dst_entry *dst;
__u8 rcv_wscale;
u32 tsoff = 0;
int l3index;
if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) ||
!th->ack || th->rst)
......@@ -214,6 +215,10 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
treq->snt_isn = cookie;
treq->ts_off = 0;
treq->txhash = net_tx_rndhash();
l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
tcp_ao_syncookie(sk, skb, treq, AF_INET6, l3index);
if (IS_ENABLED(CONFIG_SMC))
ireq->smc_ok = 0;
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment