Commit 643b622b authored by Menglong Dong's avatar Menglong Dong Committed by David S. Miller

net: tcp: add skb drop reasons to tcp_v{4,6}_inbound_md5_hash()

Pass the address of drop reason to tcp_v4_inbound_md5_hash() and
tcp_v6_inbound_md5_hash() to store the reasons for skb drops when this
function fails. Therefore, the drop reason can be passed to
kfree_skb_reason() when the skb needs to be freed.

Following drop reasons are added:

SKB_DROP_REASON_TCP_MD5NOTFOUND
SKB_DROP_REASON_TCP_MD5UNEXPECTED
SKB_DROP_REASON_TCP_MD5FAILURE

SKB_DROP_REASON_TCP_MD5* above correspond to LINUX_MIB_TCPMD5*
Reviewed-by: default avatarMengen Sun <mengensun@tencent.com>
Reviewed-by: default avatarHao Peng <flyingpeng@tencent.com>
Signed-off-by: default avatarMenglong Dong <imagedong@tencent.com>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarDavid Ahern <dsahern@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c0e3154d
...@@ -346,6 +346,18 @@ enum skb_drop_reason { ...@@ -346,6 +346,18 @@ enum skb_drop_reason {
* udp packet drop out of * udp packet drop out of
* udp_memory_allocated. * udp_memory_allocated.
*/ */
SKB_DROP_REASON_TCP_MD5NOTFOUND, /* no MD5 hash and one
* expected, corresponding
* to LINUX_MIB_TCPMD5NOTFOUND
*/
SKB_DROP_REASON_TCP_MD5UNEXPECTED, /* MD5 hash and we're not
* expecting one, corresponding
* to LINUX_MIB_TCPMD5UNEXPECTED
*/
SKB_DROP_REASON_TCP_MD5FAILURE, /* MD5 hash and its wrong,
* corresponding to
* LINUX_MIB_TCPMD5FAILURE
*/
SKB_DROP_REASON_MAX, SKB_DROP_REASON_MAX,
}; };
......
...@@ -27,6 +27,10 @@ ...@@ -27,6 +27,10 @@
EM(SKB_DROP_REASON_IP_NOPROTO, IP_NOPROTO) \ EM(SKB_DROP_REASON_IP_NOPROTO, IP_NOPROTO) \
EM(SKB_DROP_REASON_SOCKET_RCVBUFF, SOCKET_RCVBUFF) \ EM(SKB_DROP_REASON_SOCKET_RCVBUFF, SOCKET_RCVBUFF) \
EM(SKB_DROP_REASON_PROTO_MEM, PROTO_MEM) \ EM(SKB_DROP_REASON_PROTO_MEM, PROTO_MEM) \
EM(SKB_DROP_REASON_TCP_MD5NOTFOUND, TCP_MD5NOTFOUND) \
EM(SKB_DROP_REASON_TCP_MD5UNEXPECTED, \
TCP_MD5UNEXPECTED) \
EM(SKB_DROP_REASON_TCP_MD5FAILURE, TCP_MD5FAILURE) \
EMe(SKB_DROP_REASON_MAX, MAX) EMe(SKB_DROP_REASON_MAX, MAX)
#undef EM #undef EM
......
...@@ -1412,7 +1412,8 @@ EXPORT_SYMBOL(tcp_v4_md5_hash_skb); ...@@ -1412,7 +1412,8 @@ EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
/* Called with rcu_read_lock() */ /* Called with rcu_read_lock() */
static bool tcp_v4_inbound_md5_hash(const struct sock *sk, static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
const struct sk_buff *skb, const struct sk_buff *skb,
int dif, int sdif) int dif, int sdif,
enum skb_drop_reason *reason)
{ {
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
/* /*
...@@ -1445,11 +1446,13 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk, ...@@ -1445,11 +1446,13 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
return false; return false;
if (hash_expected && !hash_location) { if (hash_expected && !hash_location) {
*reason = SKB_DROP_REASON_TCP_MD5NOTFOUND;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return true; return true;
} }
if (!hash_expected && hash_location) { if (!hash_expected && hash_location) {
*reason = SKB_DROP_REASON_TCP_MD5UNEXPECTED;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return true; return true;
} }
...@@ -1462,6 +1465,7 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk, ...@@ -1462,6 +1465,7 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
NULL, skb); NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) { if (genhash || memcmp(hash_location, newhash, 16) != 0) {
*reason = SKB_DROP_REASON_TCP_MD5FAILURE;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n", net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
&iph->saddr, ntohs(th->source), &iph->saddr, ntohs(th->source),
...@@ -1971,13 +1975,13 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph, ...@@ -1971,13 +1975,13 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
int tcp_v4_rcv(struct sk_buff *skb) int tcp_v4_rcv(struct sk_buff *skb)
{ {
struct net *net = dev_net(skb->dev); struct net *net = dev_net(skb->dev);
enum skb_drop_reason drop_reason;
int sdif = inet_sdif(skb); int sdif = inet_sdif(skb);
int dif = inet_iif(skb); int dif = inet_iif(skb);
const struct iphdr *iph; const struct iphdr *iph;
const struct tcphdr *th; const struct tcphdr *th;
bool refcounted; bool refcounted;
struct sock *sk; struct sock *sk;
int drop_reason;
int ret; int ret;
drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
...@@ -2025,7 +2029,8 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -2025,7 +2029,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
struct sock *nsk; struct sock *nsk;
sk = req->rsk_listener; sk = req->rsk_listener;
if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) { if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif,
&drop_reason))) {
sk_drops_add(sk, skb); sk_drops_add(sk, skb);
reqsk_put(req); reqsk_put(req);
goto discard_it; goto discard_it;
...@@ -2099,7 +2104,7 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -2099,7 +2104,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
goto discard_and_relse; goto discard_and_relse;
} }
if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif)) if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif, &drop_reason))
goto discard_and_relse; goto discard_and_relse;
nf_reset_ct(skb); nf_reset_ct(skb);
......
...@@ -775,7 +775,8 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, ...@@ -775,7 +775,8 @@ static int tcp_v6_md5_hash_skb(char *md5_hash,
static bool tcp_v6_inbound_md5_hash(const struct sock *sk, static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
const struct sk_buff *skb, const struct sk_buff *skb,
int dif, int sdif) int dif, int sdif,
enum skb_drop_reason *reason)
{ {
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
const __u8 *hash_location = NULL; const __u8 *hash_location = NULL;
...@@ -798,11 +799,13 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk, ...@@ -798,11 +799,13 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
return false; return false;
if (hash_expected && !hash_location) { if (hash_expected && !hash_location) {
*reason = SKB_DROP_REASON_TCP_MD5NOTFOUND;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return true; return true;
} }
if (!hash_expected && hash_location) { if (!hash_expected && hash_location) {
*reason = SKB_DROP_REASON_TCP_MD5UNEXPECTED;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return true; return true;
} }
...@@ -813,6 +816,7 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk, ...@@ -813,6 +816,7 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
NULL, skb); NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) { if (genhash || memcmp(hash_location, newhash, 16) != 0) {
*reason = SKB_DROP_REASON_TCP_MD5FAILURE;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n", net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
genhash ? "failed" : "mismatch", genhash ? "failed" : "mismatch",
...@@ -1681,7 +1685,8 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1681,7 +1685,8 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
struct sock *nsk; struct sock *nsk;
sk = req->rsk_listener; sk = req->rsk_listener;
if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) { if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif,
&drop_reason)) {
sk_drops_add(sk, skb); sk_drops_add(sk, skb);
reqsk_put(req); reqsk_put(req);
goto discard_it; goto discard_it;
...@@ -1752,7 +1757,7 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1752,7 +1757,7 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
goto discard_and_relse; goto discard_and_relse;
} }
if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif, &drop_reason))
goto discard_and_relse; goto discard_and_relse;
if (tcp_filter(sk, skb)) { if (tcp_filter(sk, skb)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment