Commit d4b2816d authored by Phoebe Buckheister's avatar Phoebe Buckheister Committed by David S. Miller

6lowpan: fix fragmentation

Currently, 6lowpan creates one 802.15.4 MAC header for the original
packet the device was given by upper layers and reuses this header for
all fragments, if fragmentation is required. This also reuses frame
sequence numbers, which must not happen. 6lowpan also has issues with
fragmentation in the presence of security headers, since those may imply
the presence of trailing fields that are not accounted for by the
fragmentation code right now.

Fix both of these issues by properly allocating fragment skbs with
headromm and tailroom as specified by the underlying device, create one
header for each skb instead of reusing the original header, let the
underlying device do the rest.
Signed-off-by: default avatarPhoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 32edc40a
...@@ -220,139 +220,149 @@ static int lowpan_set_address(struct net_device *dev, void *p) ...@@ -220,139 +220,149 @@ static int lowpan_set_address(struct net_device *dev, void *p)
return 0; return 0;
} }
static int static struct sk_buff*
lowpan_fragment_xmit(struct sk_buff *skb, u8 *head, lowpan_alloc_frag(struct sk_buff *skb, int size,
int mlen, int plen, int offset, int type) const struct ieee802154_hdr *master_hdr)
{ {
struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
struct sk_buff *frag; struct sk_buff *frag;
int hlen; int rc;
hlen = (type == LOWPAN_DISPATCH_FRAG1) ? frag = alloc_skb(real_dev->hard_header_len +
LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE; real_dev->needed_tailroom + size,
GFP_ATOMIC);
raw_dump_inline(__func__, "6lowpan fragment header", head, hlen); if (likely(frag)) {
frag->dev = real_dev;
frag->priority = skb->priority;
skb_reserve(frag, real_dev->hard_header_len);
skb_reset_network_header(frag);
*mac_cb(frag) = *mac_cb(skb);
rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
&master_hdr->source, size);
if (rc < 0) {
kfree_skb(frag);
return ERR_PTR(-rc);
}
} else {
frag = ERR_PTR(ENOMEM);
}
frag = netdev_alloc_skb(skb->dev, return frag;
hlen + mlen + plen + IEEE802154_MFR_SIZE); }
if (!frag)
return -ENOMEM;
frag->priority = skb->priority; static int
lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
u8 *frag_hdr, int frag_hdrlen,
int offset, int len)
{
struct sk_buff *frag;
/* copy header, MFR and payload */ raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
skb_put(frag, mlen);
skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
skb_put(frag, hlen); frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
skb_copy_to_linear_data_offset(frag, mlen, head, hlen); if (IS_ERR(frag))
return -PTR_ERR(frag);
skb_put(frag, plen); memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
skb_copy_to_linear_data_offset(frag, mlen + hlen, memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
skb_network_header(skb) + offset, plen);
raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len); raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
return dev_queue_xmit(frag); return dev_queue_xmit(frag);
} }
static int static int
lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev) lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
const struct ieee802154_hdr *wpan_hdr)
{ {
int err; u16 dgram_size, dgram_offset;
u16 dgram_offset, dgram_size, payload_length, header_length, __be16 frag_tag;
lowpan_size, frag_plen, offset; u8 frag_hdr[5];
__be16 tag; int frag_cap, frag_len, payload_cap, rc;
u8 head[5]; int skb_unprocessed, skb_offset;
header_length = skb->mac_len;
payload_length = skb->len - header_length;
tag = lowpan_dev_info(dev)->fragment_tag++;
lowpan_size = skb_network_header_len(skb);
dgram_size = lowpan_uncompress_size(skb, &dgram_offset) - dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
header_length; skb->mac_len;
frag_tag = lowpan_dev_info(dev)->fragment_tag++;
/* first fragment header */ frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
head[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x7); frag_hdr[1] = dgram_size & 0xff;
head[1] = dgram_size & 0xff; memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
memcpy(head + 2, &tag, sizeof(tag));
/* calc the nearest payload length(divided to 8) for first fragment payload_cap = ieee802154_max_payload(wpan_hdr);
* which fits into a IEEE802154_MTU
*/
frag_plen = round_down(IEEE802154_MTU - header_length -
LOWPAN_FRAG1_HEAD_SIZE - lowpan_size -
IEEE802154_MFR_SIZE, 8);
err = lowpan_fragment_xmit(skb, head, header_length,
frag_plen + lowpan_size, 0,
LOWPAN_DISPATCH_FRAG1);
if (err) {
pr_debug("%s unable to send FRAG1 packet (tag: %d)",
__func__, tag);
goto exit;
}
offset = lowpan_size + frag_plen; frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
dgram_offset += frag_plen; skb_network_header_len(skb), 8);
/* next fragment header */ skb_offset = skb_network_header_len(skb);
head[0] &= ~LOWPAN_DISPATCH_FRAG1; skb_unprocessed = skb->len - skb->mac_len - skb_offset;
head[0] |= LOWPAN_DISPATCH_FRAGN;
frag_plen = round_down(IEEE802154_MTU - header_length - rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
LOWPAN_FRAGN_HEAD_SIZE - IEEE802154_MFR_SIZE, 8); LOWPAN_FRAG1_HEAD_SIZE, 0,
frag_len + skb_network_header_len(skb));
if (rc) {
pr_debug("%s unable to send FRAG1 packet (tag: %d)",
__func__, frag_tag);
goto err;
}
while (payload_length - offset > 0) { frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
int len = frag_plen; frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
head[4] = dgram_offset >> 3; while (skb_unprocessed >= frag_cap) {
dgram_offset += frag_len;
skb_offset += frag_len;
skb_unprocessed -= frag_len;
frag_len = min(frag_cap, skb_unprocessed);
if (payload_length - offset < len) frag_hdr[4] = dgram_offset >> 3;
len = payload_length - offset;
err = lowpan_fragment_xmit(skb, head, header_length, len, rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
offset, LOWPAN_DISPATCH_FRAGN); LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
if (err) { frag_len);
if (rc) {
pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n", pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
__func__, tag, offset); __func__, frag_tag, skb_offset);
goto exit; goto err;
} }
offset += len;
dgram_offset += len;
} }
exit: consume_skb(skb);
return err; return NET_XMIT_SUCCESS;
err:
kfree_skb(skb);
return rc;
} }
static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
int err = -1; struct ieee802154_hdr wpan_hdr;
int max_single;
pr_debug("package xmit\n"); pr_debug("package xmit\n");
skb->dev = lowpan_dev_info(dev)->real_dev; if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
if (skb->dev == NULL) { kfree_skb(skb);
pr_debug("ERROR: no real wpan device found\n"); return NET_XMIT_DROP;
goto error;
} }
/* Send directly if less than the MTU minus the 2 checksum bytes. */ max_single = ieee802154_max_payload(&wpan_hdr);
if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
err = dev_queue_xmit(skb); if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
goto out; skb->dev = lowpan_dev_info(dev)->real_dev;
} return dev_queue_xmit(skb);
} else {
netdev_tx_t rc;
pr_debug("frame is too big, fragmentation is needed\n"); pr_debug("frame is too big, fragmentation is needed\n");
err = lowpan_skb_fragmentation(skb, dev); rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
error:
dev_kfree_skb(skb);
out:
if (err)
pr_debug("ERROR: xmit failed\n");
return (err < 0) ? NET_XMIT_DROP : err; return rc < 0 ? NET_XMIT_DROP : rc;
}
} }
static struct wpan_phy *lowpan_get_phy(const struct net_device *dev) static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment