Commit cfb6eeb4 authored by YOSHIFUJI Hideaki's avatar YOSHIFUJI Hideaki Committed by David S. Miller

[TCP]: MD5 Signature Option (RFC2385) support.

Based on implementation by Rick Payne.
Signed-off-by: default avatarYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bf6bce71
...@@ -2598,6 +2598,9 @@ S: Ucitelska 1576 ...@@ -2598,6 +2598,9 @@ S: Ucitelska 1576
S: Prague 8 S: Prague 8
S: 182 00 Czech Republic S: 182 00 Czech Republic
N: Rick Payne
D: RFC2385 Support for TCP
N: Barak A. Pearlmutter N: Barak A. Pearlmutter
E: bap@cs.unm.edu E: bap@cs.unm.edu
W: http://www.cs.unm.edu/~bap/ W: http://www.cs.unm.edu/~bap/
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <linux/socket.h>
struct tcphdr { struct tcphdr {
__be16 source; __be16 source;
...@@ -94,6 +95,7 @@ enum { ...@@ -94,6 +95,7 @@ enum {
#define TCP_INFO 11 /* Information about this connection. */ #define TCP_INFO 11 /* Information about this connection. */
#define TCP_QUICKACK 12 /* Block/reenable quick acks */ #define TCP_QUICKACK 12 /* Block/reenable quick acks */
#define TCP_CONGESTION 13 /* Congestion control algorithm */ #define TCP_CONGESTION 13 /* Congestion control algorithm */
#define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */
#define TCPI_OPT_TIMESTAMPS 1 #define TCPI_OPT_TIMESTAMPS 1
#define TCPI_OPT_SACK 2 #define TCPI_OPT_SACK 2
...@@ -157,6 +159,17 @@ struct tcp_info ...@@ -157,6 +159,17 @@ struct tcp_info
__u32 tcpi_total_retrans; __u32 tcpi_total_retrans;
}; };
/* for TCP_MD5SIG socket option */
#define TCP_MD5SIG_MAXKEYLEN 80
struct tcp_md5sig {
struct __kernel_sockaddr_storage tcpm_addr; /* address associated */
__u16 __tcpm_pad1; /* zero */
__u16 tcpm_keylen; /* key length */
__u32 __tcpm_pad2; /* zero */
__u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */
};
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/skbuff.h> #include <linux/skbuff.h>
...@@ -198,6 +211,10 @@ struct tcp_options_received { ...@@ -198,6 +211,10 @@ struct tcp_options_received {
struct tcp_request_sock { struct tcp_request_sock {
struct inet_request_sock req; struct inet_request_sock req;
#ifdef CONFIG_TCP_MD5SIG
/* Only used by TCP MD5 Signature so far. */
struct tcp_request_sock_ops *af_specific;
#endif
__u32 rcv_isn; __u32 rcv_isn;
__u32 snt_isn; __u32 snt_isn;
}; };
...@@ -363,6 +380,14 @@ struct tcp_sock { ...@@ -363,6 +380,14 @@ struct tcp_sock {
__u32 probe_seq_start; __u32 probe_seq_start;
__u32 probe_seq_end; __u32 probe_seq_end;
} mtu_probe; } mtu_probe;
#ifdef CONFIG_TCP_MD5SIG
/* TCP AF-Specific parts; only used by MD5 Signature support so far */
struct tcp_sock_af_ops *af_specific;
/* TCP MD5 Signagure Option information */
struct tcp_md5sig_info *md5sig_info;
#endif
}; };
static inline struct tcp_sock *tcp_sk(const struct sock *sk) static inline struct tcp_sock *tcp_sk(const struct sock *sk)
...@@ -377,6 +402,10 @@ struct tcp_timewait_sock { ...@@ -377,6 +402,10 @@ struct tcp_timewait_sock {
__u32 tw_rcv_wnd; __u32 tw_rcv_wnd;
__u32 tw_ts_recent; __u32 tw_ts_recent;
long tw_ts_recent_stamp; long tw_ts_recent_stamp;
#ifdef CONFIG_TCP_MD5SIG
__u16 tw_md5_keylen;
__u8 tw_md5_key[TCP_MD5SIG_MAXKEYLEN];
#endif
}; };
static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
......
...@@ -35,7 +35,8 @@ struct request_sock_ops { ...@@ -35,7 +35,8 @@ struct request_sock_ops {
struct dst_entry *dst); struct dst_entry *dst);
void (*send_ack)(struct sk_buff *skb, void (*send_ack)(struct sk_buff *skb,
struct request_sock *req); struct request_sock *req);
void (*send_reset)(struct sk_buff *skb); void (*send_reset)(struct sock *sk,
struct sk_buff *skb);
void (*destructor)(struct request_sock *req); void (*destructor)(struct request_sock *req);
}; };
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/crypto.h>
#include <net/inet_connection_sock.h> #include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h> #include <net/inet_timewait_sock.h>
...@@ -161,6 +162,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); ...@@ -161,6 +162,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOPT_SACK_PERM 4 /* SACK Permitted */ #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
#define TCPOPT_SACK 5 /* SACK Block */ #define TCPOPT_SACK 5 /* SACK Block */
#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
/* /*
* TCP option lengths * TCP option lengths
...@@ -170,6 +172,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); ...@@ -170,6 +172,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOLEN_WINDOW 3 #define TCPOLEN_WINDOW 3
#define TCPOLEN_SACK_PERM 2 #define TCPOLEN_SACK_PERM 2
#define TCPOLEN_TIMESTAMP 10 #define TCPOLEN_TIMESTAMP 10
#define TCPOLEN_MD5SIG 18
/* But this is what stacks really send out. */ /* But this is what stacks really send out. */
#define TCPOLEN_TSTAMP_ALIGNED 12 #define TCPOLEN_TSTAMP_ALIGNED 12
...@@ -178,6 +181,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); ...@@ -178,6 +181,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOLEN_SACK_BASE 2 #define TCPOLEN_SACK_BASE 2
#define TCPOLEN_SACK_BASE_ALIGNED 4 #define TCPOLEN_SACK_BASE_ALIGNED 4
#define TCPOLEN_SACK_PERBLOCK 8 #define TCPOLEN_SACK_PERBLOCK 8
#define TCPOLEN_MD5SIG_ALIGNED 20
/* Flags in tp->nonagle */ /* Flags in tp->nonagle */
#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
...@@ -299,6 +303,8 @@ extern void tcp_cleanup_rbuf(struct sock *sk, int copied); ...@@ -299,6 +303,8 @@ extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
extern int tcp_twsk_unique(struct sock *sk, extern int tcp_twsk_unique(struct sock *sk,
struct sock *sktw, void *twp); struct sock *sktw, void *twp);
extern void tcp_twsk_destructor(struct sock *sk);
static inline void tcp_dec_quickack_mode(struct sock *sk, static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts) const unsigned int pkts)
{ {
...@@ -1064,6 +1070,114 @@ static inline void clear_all_retrans_hints(struct tcp_sock *tp){ ...@@ -1064,6 +1070,114 @@ static inline void clear_all_retrans_hints(struct tcp_sock *tp){
tp->fastpath_skb_hint = NULL; tp->fastpath_skb_hint = NULL;
} }
/* MD5 Signature */
struct crypto_hash;
/* - key database */
struct tcp_md5sig_key {
u8 *key;
u8 keylen;
};
struct tcp4_md5sig_key {
u8 *key;
u16 keylen;
__be32 addr;
};
struct tcp6_md5sig_key {
u8 *key;
u16 keylen;
#if 0
u32 scope_id; /* XXX */
#endif
struct in6_addr addr;
};
/* - sock block */
struct tcp_md5sig_info {
struct tcp4_md5sig_key *keys4;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct tcp6_md5sig_key *keys6;
u32 entries6;
u32 alloced6;
#endif
u32 entries4;
u32 alloced4;
};
/* - pseudo header */
struct tcp4_pseudohdr {
__be32 saddr;
__be32 daddr;
__u8 pad;
__u8 protocol;
__be16 len;
};
struct tcp6_pseudohdr {
struct in6_addr saddr;
struct in6_addr daddr;
__be32 len;
__be32 protocol; /* including padding */
};
union tcp_md5sum_block {
struct tcp4_pseudohdr ip4;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct tcp6_pseudohdr ip6;
#endif
};
/* - pool: digest algorithm, hash description and scratch buffer */
struct tcp_md5sig_pool {
struct hash_desc md5_desc;
union tcp_md5sum_block md5_blk;
};
#define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */
/* - functions */
extern int tcp_v4_calc_md5_hash(char *md5_hash,
struct tcp_md5sig_key *key,
struct sock *sk,
struct dst_entry *dst,
struct request_sock *req,
struct tcphdr *th,
int protocol, int tcplen);
extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
struct sock *addr_sk);
extern int tcp_v4_md5_do_add(struct sock *sk,
__be32 addr,
u8 *newkey,
u8 newkeylen);
extern int tcp_v4_md5_do_del(struct sock *sk,
u32 addr);
extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void);
extern void tcp_free_md5sig_pool(void);
extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
extern void __tcp_put_md5sig_pool(void);
static inline
struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
{
int cpu = get_cpu();
struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
if (!ret)
put_cpu();
return ret;
}
static inline void tcp_put_md5sig_pool(void)
{
__tcp_put_md5sig_pool();
put_cpu();
}
/* /proc */ /* /proc */
enum tcp_seq_states { enum tcp_seq_states {
TCP_SEQ_STATE_LISTENING, TCP_SEQ_STATE_LISTENING,
...@@ -1103,6 +1217,35 @@ extern int tcp4_proc_init(void); ...@@ -1103,6 +1217,35 @@ extern int tcp4_proc_init(void);
extern void tcp4_proc_exit(void); extern void tcp4_proc_exit(void);
#endif #endif
/* TCP af-specific functions */
struct tcp_sock_af_ops {
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
struct sock *addr_sk);
int (*calc_md5_hash) (char *location,
struct tcp_md5sig_key *md5,
struct sock *sk,
struct dst_entry *dst,
struct request_sock *req,
struct tcphdr *th,
int protocol, int len);
int (*md5_add) (struct sock *sk,
struct sock *addr_sk,
u8 *newkey,
u8 len);
int (*md5_parse) (struct sock *sk,
char __user *optval,
int optlen);
#endif
};
struct tcp_request_sock_ops {
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
struct request_sock *req);
#endif
};
extern void tcp_v4_init(struct net_proto_family *ops); extern void tcp_v4_init(struct net_proto_family *ops);
extern void tcp_init(void); extern void tcp_init(void);
......
...@@ -31,6 +31,9 @@ static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp) ...@@ -31,6 +31,9 @@ static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
static inline void twsk_destructor(struct sock *sk) static inline void twsk_destructor(struct sock *sk)
{ {
BUG_ON(sk == NULL);
BUG_ON(sk->sk_prot == NULL);
BUG_ON(sk->sk_prot->twsk_prot == NULL);
if (sk->sk_prot->twsk_prot->twsk_destructor != NULL) if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
sk->sk_prot->twsk_prot->twsk_destructor(sk); sk->sk_prot->twsk_prot->twsk_destructor(sk);
} }
......
...@@ -509,7 +509,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, ...@@ -509,7 +509,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
return err; return err;
} }
static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
{ {
int err; int err;
struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
...@@ -724,7 +724,7 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -724,7 +724,7 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0; return 0;
reset: reset:
dccp_v4_ctl_send_reset(skb); dccp_v4_ctl_send_reset(sk, skb);
discard: discard:
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
...@@ -913,7 +913,7 @@ static int dccp_v4_rcv(struct sk_buff *skb) ...@@ -913,7 +913,7 @@ static int dccp_v4_rcv(struct sk_buff *skb)
if (dh->dccph_type != DCCP_PKT_RESET) { if (dh->dccph_type != DCCP_PKT_RESET) {
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_SKB_CB(skb)->dccpd_reset_code =
DCCP_RESET_CODE_NO_CONNECTION; DCCP_RESET_CODE_NO_CONNECTION;
dccp_v4_ctl_send_reset(skb); dccp_v4_ctl_send_reset(sk, skb);
} }
discard_it: discard_it:
......
...@@ -310,7 +310,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req) ...@@ -310,7 +310,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req)
kfree_skb(inet6_rsk(req)->pktopts); kfree_skb(inet6_rsk(req)->pktopts);
} }
static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
{ {
struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
...@@ -805,7 +805,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -805,7 +805,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0; return 0;
reset: reset:
dccp_v6_ctl_send_reset(skb); dccp_v6_ctl_send_reset(sk, skb);
discard: discard:
if (opt_skb != NULL) if (opt_skb != NULL)
__kfree_skb(opt_skb); __kfree_skb(opt_skb);
...@@ -902,7 +902,7 @@ static int dccp_v6_rcv(struct sk_buff **pskb) ...@@ -902,7 +902,7 @@ static int dccp_v6_rcv(struct sk_buff **pskb)
if (dh->dccph_type != DCCP_PKT_RESET) { if (dh->dccph_type != DCCP_PKT_RESET) {
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_SKB_CB(skb)->dccpd_reset_code =
DCCP_RESET_CODE_NO_CONNECTION; DCCP_RESET_CODE_NO_CONNECTION;
dccp_v6_ctl_send_reset(skb); dccp_v6_ctl_send_reset(sk, skb);
} }
discard_it: discard_it:
......
...@@ -246,7 +246,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, ...@@ -246,7 +246,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
drop: drop:
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
req->rsk_ops->send_reset(skb); req->rsk_ops->send_reset(sk, skb);
inet_csk_reqsk_queue_drop(sk, req, prev); inet_csk_reqsk_queue_drop(sk, req, prev);
goto out; goto out;
......
...@@ -618,5 +618,21 @@ config DEFAULT_TCP_CONG ...@@ -618,5 +618,21 @@ config DEFAULT_TCP_CONG
default "reno" if DEFAULT_RENO default "reno" if DEFAULT_RENO
default "cubic" default "cubic"
config TCP_MD5SIG
bool "TCP: MD5 Signature Option support (RFC2385) (EXPERIMENTAL)"
depends on EXPERIMENTAL
select CRYPTO
select CRYPTO_MD5
---help---
RFC2385 specifices a method of giving MD5 protection to TCP sessions.
Its main (only?) use is to protect BGP sessions between core routers
on the Internet.
If unsure, say N.
config TCP_MD5SIG_DEBUG
bool "TCP: MD5 Signature Option debugging"
depends on TCP_MD5SIG
source "net/ipv4/ipvs/Kconfig" source "net/ipv4/ipvs/Kconfig"
...@@ -258,6 +258,7 @@ ...@@ -258,6 +258,7 @@
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/crypto.h>
#include <net/icmp.h> #include <net/icmp.h>
#include <net/tcp.h> #include <net/tcp.h>
...@@ -1942,6 +1943,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level, ...@@ -1942,6 +1943,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
} }
break; break;
#ifdef CONFIG_TCP_MD5SIG
case TCP_MD5SIG:
/* Read the IP->Key mappings from userspace */
err = tp->af_specific->md5_parse(sk, optval, optlen);
break;
#endif
default: default:
err = -ENOPROTOOPT; err = -ENOPROTOOPT;
break; break;
...@@ -2231,6 +2239,135 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) ...@@ -2231,6 +2239,135 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
} }
EXPORT_SYMBOL(tcp_tso_segment); EXPORT_SYMBOL(tcp_tso_segment);
#ifdef CONFIG_TCP_MD5SIG
static unsigned long tcp_md5sig_users;
static struct tcp_md5sig_pool **tcp_md5sig_pool;
static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
{
int cpu;
for_each_possible_cpu(cpu) {
struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
if (p) {
if (p->md5_desc.tfm)
crypto_free_hash(p->md5_desc.tfm);
kfree(p);
p = NULL;
}
}
free_percpu(pool);
}
void tcp_free_md5sig_pool(void)
{
struct tcp_md5sig_pool **pool = NULL;
spin_lock(&tcp_md5sig_pool_lock);
if (--tcp_md5sig_users == 0) {
pool = tcp_md5sig_pool;
tcp_md5sig_pool = NULL;
}
spin_unlock(&tcp_md5sig_pool_lock);
if (pool)
__tcp_free_md5sig_pool(pool);
}
EXPORT_SYMBOL(tcp_free_md5sig_pool);
struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
{
int cpu;
struct tcp_md5sig_pool **pool;
pool = alloc_percpu(struct tcp_md5sig_pool *);
if (!pool)
return NULL;
for_each_possible_cpu(cpu) {
struct tcp_md5sig_pool *p;
struct crypto_hash *hash;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
goto out_free;
*per_cpu_ptr(pool, cpu) = p;
hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (!hash || IS_ERR(hash))
goto out_free;
p->md5_desc.tfm = hash;
}
return pool;
out_free:
__tcp_free_md5sig_pool(pool);
return NULL;
}
struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
{
struct tcp_md5sig_pool **pool;
int alloc = 0;
retry:
spin_lock(&tcp_md5sig_pool_lock);
pool = tcp_md5sig_pool;
if (tcp_md5sig_users++ == 0) {
alloc = 1;
spin_unlock(&tcp_md5sig_pool_lock);
} else if (!pool) {
tcp_md5sig_users--;
spin_unlock(&tcp_md5sig_pool_lock);
cpu_relax();
goto retry;
} else
spin_unlock(&tcp_md5sig_pool_lock);
if (alloc) {
/* we cannot hold spinlock here because this may sleep. */
struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
spin_lock(&tcp_md5sig_pool_lock);
if (!p) {
tcp_md5sig_users--;
spin_unlock(&tcp_md5sig_pool_lock);
return NULL;
}
pool = tcp_md5sig_pool;
if (pool) {
/* oops, it has already been assigned. */
spin_unlock(&tcp_md5sig_pool_lock);
__tcp_free_md5sig_pool(p);
} else {
tcp_md5sig_pool = pool = p;
spin_unlock(&tcp_md5sig_pool_lock);
}
}
return pool;
}
EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
{
struct tcp_md5sig_pool **p;
spin_lock(&tcp_md5sig_pool_lock);
p = tcp_md5sig_pool;
if (p)
tcp_md5sig_users++;
spin_unlock(&tcp_md5sig_pool_lock);
return (p ? *per_cpu_ptr(p, cpu) : NULL);
}
EXPORT_SYMBOL(__tcp_get_md5sig_pool);
void __tcp_put_md5sig_pool(void) {
__tcp_free_md5sig_pool(tcp_md5sig_pool);
}
EXPORT_SYMBOL(__tcp_put_md5sig_pool);
#endif
extern void __skb_cb_too_small_for_tcp(int, int); extern void __skb_cb_too_small_for_tcp(int, int);
extern struct tcp_congestion_ops tcp_reno; extern struct tcp_congestion_ops tcp_reno;
......
...@@ -2677,6 +2677,14 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, ...@@ -2677,6 +2677,14 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
opt_rx->sack_ok) { opt_rx->sack_ok) {
TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
} }
#ifdef CONFIG_TCP_MD5SIG
case TCPOPT_MD5SIG:
/*
* The MD5 Hash has already been
* checked (see tcp_v{4,6}_do_rcv()).
*/
break;
#endif
}; };
ptr+=opsize-2; ptr+=opsize-2;
length-=opsize; length-=opsize;
......
This diff is collapsed.
...@@ -306,6 +306,28 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) ...@@ -306,6 +306,28 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tw->tw_ipv6only = np->ipv6only; tw->tw_ipv6only = np->ipv6only;
} }
#endif #endif
#ifdef CONFIG_TCP_MD5SIG
/*
* The timewait bucket does not have the key DB from the
* sock structure. We just make a quick copy of the
* md5 key being used (if indeed we are using one)
* so the timewait ack generating code has the key.
*/
do {
struct tcp_md5sig_key *key;
memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
tcptw->tw_md5_keylen = 0;
key = tp->af_specific->md5_lookup(sk, sk);
if (key != NULL) {
memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
tcptw->tw_md5_keylen = key->keylen;
if (tcp_alloc_md5sig_pool() == NULL)
BUG();
}
} while(0);
#endif
/* Linkage updates. */ /* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &tcp_hashinfo); __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
...@@ -337,6 +359,17 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) ...@@ -337,6 +359,17 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tcp_done(sk); tcp_done(sk);
} }
void tcp_twsk_destructor(struct sock *sk)
{
struct tcp_timewait_sock *twsk = tcp_twsk(sk);
#ifdef CONFIG_TCP_MD5SIG
if (twsk->tw_md5_keylen)
tcp_put_md5sig_pool();
#endif
}
EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
/* This is not only more efficient than what we used to do, it eliminates /* This is not only more efficient than what we used to do, it eliminates
* a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
* *
...@@ -435,6 +468,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, ...@@ -435,6 +468,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->rx_opt.ts_recent_stamp = 0; newtp->rx_opt.ts_recent_stamp = 0;
newtp->tcp_header_len = sizeof(struct tcphdr); newtp->tcp_header_len = sizeof(struct tcphdr);
} }
#ifdef CONFIG_TCP_MD5SIG
newtp->md5sig_info = NULL; /*XXX*/
if (newtp->af_specific->md5_lookup(sk, newsk))
newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
#endif
if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss; newtp->rx_opt.mss_clamp = req->mss;
...@@ -617,6 +655,30 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, ...@@ -617,6 +655,30 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
req, NULL); req, NULL);
if (child == NULL) if (child == NULL)
goto listen_overflow; goto listen_overflow;
#ifdef CONFIG_TCP_MD5SIG
else {
/* Copy over the MD5 key from the original socket */
struct tcp_md5sig_key *key;
struct tcp_sock *tp = tcp_sk(sk);
key = tp->af_specific->md5_lookup(sk, child);
if (key != NULL) {
/*
* We're using one, so create a matching key on the
* newsk structure. If we fail to get memory then we
* end up not copying the key across. Shucks.
*/
char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
if (newkey) {
if (!tcp_alloc_md5sig_pool())
BUG();
memcpy(newkey, key->key, key->keylen);
tp->af_specific->md5_add(child, child,
newkey,
key->keylen);
}
}
}
#endif
inet_csk_reqsk_queue_unlink(sk, req, prev); inet_csk_reqsk_queue_unlink(sk, req, prev);
inet_csk_reqsk_queue_removed(sk, req); inet_csk_reqsk_queue_removed(sk, req);
...@@ -633,7 +695,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, ...@@ -633,7 +695,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
embryonic_reset: embryonic_reset:
NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
if (!(flg & TCP_FLAG_RST)) if (!(flg & TCP_FLAG_RST))
req->rsk_ops->send_reset(skb); req->rsk_ops->send_reset(sk, skb);
inet_csk_reqsk_queue_drop(sk, req, prev); inet_csk_reqsk_queue_drop(sk, req, prev);
return NULL; return NULL;
......
...@@ -270,7 +270,7 @@ static u16 tcp_select_window(struct sock *sk) ...@@ -270,7 +270,7 @@ static u16 tcp_select_window(struct sock *sk)
} }
static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
__u32 tstamp) __u32 tstamp, __u8 **md5_hash)
{ {
if (tp->rx_opt.tstamp_ok) { if (tp->rx_opt.tstamp_ok) {
*ptr++ = htonl((TCPOPT_NOP << 24) | *ptr++ = htonl((TCPOPT_NOP << 24) |
...@@ -298,16 +298,29 @@ static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, ...@@ -298,16 +298,29 @@ static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
tp->rx_opt.eff_sacks--; tp->rx_opt.eff_sacks--;
} }
} }
#ifdef CONFIG_TCP_MD5SIG
if (md5_hash) {
*ptr++ = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_MD5SIG << 8) |
TCPOLEN_MD5SIG);
*md5_hash = (__u8 *)ptr;
}
#endif
} }
/* Construct a tcp options header for a SYN or SYN_ACK packet. /* Construct a tcp options header for a SYN or SYN_ACK packet.
* If this is every changed make sure to change the definition of * If this is every changed make sure to change the definition of
* MAX_SYN_SIZE to match the new maximum number of options that you * MAX_SYN_SIZE to match the new maximum number of options that you
* can generate. * can generate.
*
* Note - that with the RFC2385 TCP option, we make room for the
* 16 byte MD5 hash. This will be filled in later, so the pointer for the
* location to be filled is passed back up.
*/ */
static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
int offer_wscale, int wscale, __u32 tstamp, int offer_wscale, int wscale, __u32 tstamp,
__u32 ts_recent) __u32 ts_recent, __u8 **md5_hash)
{ {
/* We always get an MSS option. /* We always get an MSS option.
* The option bytes which will be seen in normal data * The option bytes which will be seen in normal data
...@@ -346,6 +359,20 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, ...@@ -346,6 +359,20 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
(TCPOPT_WINDOW << 16) | (TCPOPT_WINDOW << 16) |
(TCPOLEN_WINDOW << 8) | (TCPOLEN_WINDOW << 8) |
(wscale)); (wscale));
#ifdef CONFIG_TCP_MD5SIG
/*
* If MD5 is enabled, then we set the option, and include the size
* (always 18). The actual MD5 hash is added just before the
* packet is sent.
*/
if (md5_hash) {
*ptr++ = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_MD5SIG << 8) |
TCPOLEN_MD5SIG);
*md5_hash = (__u8 *) ptr;
}
#endif
} }
/* This routine actually transmits TCP packets queued in by /* This routine actually transmits TCP packets queued in by
...@@ -366,6 +393,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, ...@@ -366,6 +393,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
struct tcp_sock *tp; struct tcp_sock *tp;
struct tcp_skb_cb *tcb; struct tcp_skb_cb *tcb;
int tcp_header_size; int tcp_header_size;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *md5;
__u8 *md5_hash_location;
#endif
struct tcphdr *th; struct tcphdr *th;
int sysctl_flags; int sysctl_flags;
int err; int err;
...@@ -424,6 +455,16 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, ...@@ -424,6 +455,16 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
if (tcp_packets_in_flight(tp) == 0) if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START); tcp_ca_event(sk, CA_EVENT_TX_START);
#ifdef CONFIG_TCP_MD5SIG
/*
* Are we doing MD5 on this segment? If so - make
* room for it.
*/
md5 = tp->af_specific->md5_lookup(sk, sk);
if (md5)
tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
#endif
th = (struct tcphdr *) skb_push(skb, tcp_header_size); th = (struct tcphdr *) skb_push(skb, tcp_header_size);
skb->h.th = th; skb->h.th = th;
...@@ -460,13 +501,34 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, ...@@ -460,13 +501,34 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
(sysctl_flags & SYSCTL_FLAG_WSCALE), (sysctl_flags & SYSCTL_FLAG_WSCALE),
tp->rx_opt.rcv_wscale, tp->rx_opt.rcv_wscale,
tcb->when, tcb->when,
tp->rx_opt.ts_recent); tp->rx_opt.ts_recent,
#ifdef CONFIG_TCP_MD5SIG
md5 ? &md5_hash_location :
#endif
NULL);
} else { } else {
tcp_build_and_update_options((__be32 *)(th + 1), tcp_build_and_update_options((__be32 *)(th + 1),
tp, tcb->when); tp, tcb->when,
#ifdef CONFIG_TCP_MD5SIG
md5 ? &md5_hash_location :
#endif
NULL);
TCP_ECN_send(sk, tp, skb, tcp_header_size); TCP_ECN_send(sk, tp, skb, tcp_header_size);
} }
#ifdef CONFIG_TCP_MD5SIG
/* Calculate the MD5 hash, as we have all we need now */
if (md5) {
tp->af_specific->calc_md5_hash(md5_hash_location,
md5,
sk, NULL, NULL,
skb->h.th,
sk->sk_protocol,
skb->len);
}
#endif
icsk->icsk_af_ops->send_check(sk, skb->len, skb); icsk->icsk_af_ops->send_check(sk, skb->len, skb);
if (likely(tcb->flags & TCPCB_FLAG_ACK)) if (likely(tcb->flags & TCPCB_FLAG_ACK))
...@@ -840,6 +902,11 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) ...@@ -840,6 +902,11 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
(tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
#ifdef CONFIG_TCP_MD5SIG
if (tp->af_specific->md5_lookup(sk, sk))
mss_now -= TCPOLEN_MD5SIG_ALIGNED;
#endif
xmit_size_goal = mss_now; xmit_size_goal = mss_now;
if (doing_tso) { if (doing_tso) {
...@@ -2033,6 +2100,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, ...@@ -2033,6 +2100,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct tcphdr *th; struct tcphdr *th;
int tcp_header_size; int tcp_header_size;
struct sk_buff *skb; struct sk_buff *skb;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *md5;
__u8 *md5_hash_location;
#endif
skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
if (skb == NULL) if (skb == NULL)
...@@ -2048,6 +2119,13 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, ...@@ -2048,6 +2119,13 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
(ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
/* SACK_PERM is in the place of NOP NOP of TS */ /* SACK_PERM is in the place of NOP NOP of TS */
((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
#ifdef CONFIG_TCP_MD5SIG
/* Are we doing MD5 on this segment? If so - make room for it */
md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
if (md5)
tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
#endif
skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
memset(th, 0, sizeof(struct tcphdr)); memset(th, 0, sizeof(struct tcphdr));
...@@ -2085,11 +2163,29 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, ...@@ -2085,11 +2163,29 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
TCP_SKB_CB(skb)->when, TCP_SKB_CB(skb)->when,
req->ts_recent); req->ts_recent,
(
#ifdef CONFIG_TCP_MD5SIG
md5 ? &md5_hash_location :
#endif
NULL)
);
skb->csum = 0; skb->csum = 0;
th->doff = (tcp_header_size >> 2); th->doff = (tcp_header_size >> 2);
TCP_INC_STATS(TCP_MIB_OUTSEGS); TCP_INC_STATS(TCP_MIB_OUTSEGS);
#ifdef CONFIG_TCP_MD5SIG
/* Okay, we have all we need - do the md5 hash if needed */
if (md5) {
tp->af_specific->calc_md5_hash(md5_hash_location,
md5,
NULL, dst, req,
skb->h.th, sk->sk_protocol,
skb->len);
}
#endif
return skb; return skb;
} }
...@@ -2108,6 +2204,11 @@ static void tcp_connect_init(struct sock *sk) ...@@ -2108,6 +2204,11 @@ static void tcp_connect_init(struct sock *sk)
tp->tcp_header_len = sizeof(struct tcphdr) + tp->tcp_header_len = sizeof(struct tcphdr) +
(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
#ifdef CONFIG_TCP_MD5SIG
if (tp->af_specific->md5_lookup(sk, sk) != NULL)
tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
#endif
/* If user gave his TCP_MAXSEG, record it to clamp */ /* If user gave his TCP_MAXSEG, record it to clamp */
if (tp->rx_opt.user_mss) if (tp->rx_opt.user_mss)
tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment