Commit a6e49699 authored by David S. Miller's avatar David S. Miller

Merge branch 'rmnet-checksums-part-1'

Alex Elder says:

====================
net: qualcomm: rmnet: MAPv4 download checksum cleanup, part 1

I'm posting a large series an two smaller parts; this is part 1.

The RMNet driver handles MAP (or QMAP) protocol traffic.  There are
several versions of this protocol.  Version 1 supports multiplexing,
as well as aggregation of packets in a single buffer.  Version 4
adds the ability to perform checksum offload.  And version 5
implements checksum offload in a different way from version 4.

This series involves only MAPv4 protocol checksum offload, and only
in the download (RX) direction.  It affects handling of checksums
computed by hardware for UDP datagrams and TCP segments, carried
over both IPv4 and IPv6.

MAP packets arriving on an RMNet port implementing MAPv4 checksum
offload are passed to rmnet_map_checksum_downlink_packet() for
handling.

The packet is then passed to rmnet_map_ipv4_dl_csum_trailer() or
rmnet_map_ipv6_dl_csum_trailer(), depending contents of the MAP
payload.  These two functions interpret checksum metadata to
determine whether the checksum in the received packet matches that
calculated by the hardware.

It is these two functions that are the subject of this series (parts
1 and 2).  The bulk of these functions are transformed--in a lot of
small steps--from an extremely difficult-to-follow block of checksum
processing code into a fairly simple, heavily commented equivalent.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9e4e1dd4 23a5708d
......@@ -49,6 +49,7 @@ struct rmnet_pcpu_stats {
struct rmnet_priv_stats {
u64 csum_ok;
u64 csum_ip4_header_bad;
u64 csum_valid_unset;
u64 csum_validation_failed;
u64 csum_err_bad_buffer;
......
......@@ -19,23 +19,13 @@
static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
const void *txporthdr)
{
__sum16 *check = NULL;
if (protocol == IPPROTO_TCP)
return &((struct tcphdr *)txporthdr)->check;
switch (protocol) {
case IPPROTO_TCP:
check = &(((struct tcphdr *)txporthdr)->check);
break;
case IPPROTO_UDP:
check = &(((struct udphdr *)txporthdr)->check);
break;
if (protocol == IPPROTO_UDP)
return &((struct udphdr *)txporthdr)->check;
default:
check = NULL;
break;
}
return check;
return NULL;
}
static int
......@@ -43,46 +33,60 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
struct rmnet_map_dl_csum_trailer *csum_trailer,
struct rmnet_priv *priv)
{
__sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
u16 csum_value, csum_value_final;
struct iphdr *ip4h;
void *txporthdr;
struct iphdr *ip4h = (struct iphdr *)skb->data;
void *txporthdr = skb->data + ip4h->ihl * 4;
__sum16 *csum_field, csum_temp, pseudo_csum;
__sum16 ip_payload_csum;
u16 csum_value_final;
__be16 addend;
ip4h = (struct iphdr *)(skb->data);
if ((ntohs(ip4h->frag_off) & IP_MF) ||
((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
/* Computing the checksum over just the IPv4 header--including its
* checksum field--should yield 0. If it doesn't, the IP header
* is bad, so return an error and let the IP layer drop it.
*/
if (ip_fast_csum(ip4h, ip4h->ihl)) {
priv->stats.csum_ip4_header_bad++;
return -EINVAL;
}
/* We don't support checksum offload on IPv4 fragments */
if (ip_is_fragment(ip4h)) {
priv->stats.csum_fragmented_pkt++;
return -EOPNOTSUPP;
}
txporthdr = skb->data + ip4h->ihl * 4;
/* Checksum offload is only supported for UDP and TCP protocols */
csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
if (!csum_field) {
priv->stats.csum_err_invalid_transport++;
return -EPROTONOSUPPORT;
}
/* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
/* RFC 768: UDP checksum is optional for IPv4, and is 0 if unused */
if (!*csum_field && ip4h->protocol == IPPROTO_UDP) {
priv->stats.csum_skipped++;
return 0;
}
csum_value = ~ntohs(csum_trailer->csum_value);
hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
ip_payload_csum = csum16_sub((__force __sum16)csum_value,
(__force __be16)hdr_csum);
/* The checksum value in the trailer is computed over the entire
* IP packet, including the IP header and payload. To derive the
* transport checksum from this, we first subract the contribution
* of the IP header from the trailer checksum. We then add the
* checksum computed over the pseudo header.
*
* We verified above that the IP header contributes zero to the
* trailer checksum. Therefore the checksum in the trailer is
* just the checksum computed over the IP payload.
*/
ip_payload_csum = (__force __sum16)~csum_trailer->csum_value;
pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
ntohs(ip4h->tot_len) - ip4h->ihl * 4,
ip4h->protocol, 0);
addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
addend = (__force __be16)pseudo_csum;
pseudo_csum = csum16_add(ip_payload_csum, addend);
addend = (__force __be16)ntohs((__force __be16)*csum_field);
addend = (__force __be16)*csum_field;
csum_temp = ~csum16_sub(pseudo_csum, addend);
csum_value_final = (__force u16)csum_temp;
......@@ -101,7 +105,7 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
}
}
if (csum_value_final == ntohs((__force __be16)*csum_field)) {
if (csum_value_final == (__force u16)*csum_field) {
priv->stats.csum_ok++;
return 0;
} else {
......@@ -116,39 +120,46 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
struct rmnet_map_dl_csum_trailer *csum_trailer,
struct rmnet_priv *priv)
{
__sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
u16 csum_value, csum_value_final;
struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data;
void *txporthdr = skb->data + sizeof(*ip6h);
__sum16 *csum_field, pseudo_csum, csum_temp;
__be16 ip6_hdr_csum, addend;
struct ipv6hdr *ip6h;
void *txporthdr;
__sum16 ip6_payload_csum;
__be16 ip_header_csum;
u16 csum_value_final;
__be16 csum_value;
u32 length;
ip6h = (struct ipv6hdr *)(skb->data);
txporthdr = skb->data + sizeof(struct ipv6hdr);
/* Checksum offload is only supported for UDP and TCP protocols;
* the packet cannot include any IPv6 extension headers
*/
csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
if (!csum_field) {
priv->stats.csum_err_invalid_transport++;
return -EPROTONOSUPPORT;
}
csum_value = ~ntohs(csum_trailer->csum_value);
ip6_hdr_csum = (__force __be16)
~ntohs((__force __be16)ip_compute_csum(ip6h,
(int)(txporthdr - (void *)(skb->data))));
/* The checksum value in the trailer is computed over the entire
* IP packet, including the IP header and payload. To derive the
* transport checksum from this, we first subract the contribution
* of the IP header from the trailer checksum. We then add the
* checksum computed over the pseudo header.
*/
csum_value = ~csum_trailer->csum_value;
ip_header_csum = (__force __be16)ip_fast_csum(ip6h, sizeof(*ip6h) / 4);
ip6_hdr_csum = (__force __be16)~ip_header_csum;
ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
ip6_hdr_csum);
length = (ip6h->nexthdr == IPPROTO_UDP) ?
ntohs(((struct udphdr *)txporthdr)->len) :
ntohs(ip6h->payload_len);
pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
length, ip6h->nexthdr, 0));
addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
pseudo_csum = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
length, ip6h->nexthdr, 0);
addend = (__force __be16)pseudo_csum;
pseudo_csum = csum16_add(ip6_payload_csum, addend);
addend = (__force __be16)ntohs((__force __be16)*csum_field);
addend = (__force __be16)*csum_field;
csum_temp = ~csum16_sub(pseudo_csum, addend);
csum_value_final = (__force u16)csum_temp;
......@@ -169,7 +180,7 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
}
}
if (csum_value_final == ntohs((__force __be16)*csum_field)) {
if (csum_value_final == (__force u16)*csum_field) {
priv->stats.csum_ok++;
return 0;
} else {
......@@ -194,15 +205,14 @@ static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
}
static void
rmnet_map_ipv4_ul_csum_header(void *iphdr,
rmnet_map_ipv4_ul_csum_header(struct iphdr *iphdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
struct iphdr *ip4h = iphdr;
u16 val;
val = MAP_CSUM_UL_ENABLED_FLAG;
if (ip4h->protocol == IPPROTO_UDP)
if (iphdr->protocol == IPPROTO_UDP)
val |= MAP_CSUM_UL_UDP_FLAG;
val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
......@@ -230,15 +240,14 @@ static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
}
static void
rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
rmnet_map_ipv6_ul_csum_header(struct ipv6hdr *ipv6hdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
struct ipv6hdr *ip6h = ip6hdr;
u16 val;
val = MAP_CSUM_UL_ENABLED_FLAG;
if (ip6h->nexthdr == IPPROTO_UDP)
if (ipv6hdr->nexthdr == IPPROTO_UDP)
val |= MAP_CSUM_UL_UDP_FLAG;
val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
......@@ -247,7 +256,7 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
skb->ip_summed = CHECKSUM_NONE;
rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
rmnet_map_complement_ipv6_txporthdr_csum_field(ipv6hdr);
}
#endif
......@@ -430,21 +439,15 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
return -EINVAL;
}
if (skb->protocol == htons(ETH_P_IP)) {
if (skb->protocol == htons(ETH_P_IP))
return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_IPV6)
if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6))
return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
#else
priv->stats.csum_err_invalid_ip_version++;
return -EPROTONOSUPPORT;
#endif
} else {
priv->stats.csum_err_invalid_ip_version++;
return -EPROTONOSUPPORT;
}
return 0;
priv->stats.csum_err_invalid_ip_version++;
return -EPROTONOSUPPORT;
}
static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
......@@ -461,28 +464,26 @@ static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
goto sw_csum;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
iphdr = (char *)ul_header +
sizeof(struct rmnet_map_ul_csum_header);
if (skb->ip_summed != CHECKSUM_PARTIAL)
goto sw_csum;
if (skb->protocol == htons(ETH_P_IP)) {
rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_IPV6)
rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
#else
priv->stats.csum_err_invalid_ip_version++;
goto sw_csum;
#endif
} else {
priv->stats.csum_err_invalid_ip_version++;
}
iphdr = (char *)ul_header +
sizeof(struct rmnet_map_ul_csum_header);
if (skb->protocol == htons(ETH_P_IP)) {
rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
}
if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
}
priv->stats.csum_err_invalid_ip_version++;
sw_csum:
memset(ul_header, 0, sizeof(*ul_header));
......
......@@ -166,6 +166,7 @@ static const struct net_device_ops rmnet_vnd_ops = {
static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
"Checksum ok",
"Bad IPv4 header checksum",
"Checksum valid bit not set",
"Checksum validation failed",
"Checksum error bad buffer",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment