Commit f22828e8 authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

bnx2: Reinsert VLAN tag when necessary.

In certain cases when ASF or other management firmware is running, the
chip may be configured to always strip out the VLAN tag even when
VLAN acceleration is not enabled.  This causes some VLAN tagged
packets to be received by the host stack without any knowledge that
the original packet was VLAN tagged.

We fix this by re-inserting the VLAN tag into the packet when necessary.
Signed-off-by: default avatarMichael Chan <mchan@broadcom.com>
Signed-off-by: default avatarBenjamin Li <benli@broadcom.com>
Signed-off-by: default avatarMatt Carlson <mcarlson@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 729b85cd
...@@ -2876,6 +2876,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) ...@@ -2876,6 +2876,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
struct sw_bd *rx_buf; struct sw_bd *rx_buf;
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t dma_addr; dma_addr_t dma_addr;
u16 vtag = 0;
int hw_vlan __maybe_unused = 0;
sw_ring_cons = RX_RING_IDX(sw_cons); sw_ring_cons = RX_RING_IDX(sw_cons);
sw_ring_prod = RX_RING_IDX(sw_prod); sw_ring_prod = RX_RING_IDX(sw_prod);
...@@ -2919,7 +2921,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) ...@@ -2919,7 +2921,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
if (len <= bp->rx_copy_thresh) { if (len <= bp->rx_copy_thresh) {
struct sk_buff *new_skb; struct sk_buff *new_skb;
new_skb = netdev_alloc_skb(bp->dev, len + 2); new_skb = netdev_alloc_skb(bp->dev, len + 6);
if (new_skb == NULL) { if (new_skb == NULL) {
bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons, bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
sw_ring_prod); sw_ring_prod);
...@@ -2928,9 +2930,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) ...@@ -2928,9 +2930,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
/* aligned copy */ /* aligned copy */
skb_copy_from_linear_data_offset(skb, skb_copy_from_linear_data_offset(skb,
BNX2_RX_OFFSET - 2, BNX2_RX_OFFSET - 6,
new_skb->data, len + 2); new_skb->data, len + 6);
skb_reserve(new_skb, 2); skb_reserve(new_skb, 6);
skb_put(new_skb, len); skb_put(new_skb, len);
bnx2_reuse_rx_skb(bp, rxr, skb, bnx2_reuse_rx_skb(bp, rxr, skb,
...@@ -2941,6 +2943,25 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) ...@@ -2941,6 +2943,25 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
goto next_rx; goto next_rx;
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
vtag = rx_hdr->l2_fhdr_vlan_tag;
#ifdef BCM_VLAN
if (bp->vlgrp)
hw_vlan = 1;
else
#endif
{
struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
__skb_push(skb, 4);
memmove(ve, skb->data + 4, ETH_ALEN * 2);
ve->h_vlan_proto = htons(ETH_P_8021Q);
ve->h_vlan_TCI = htons(vtag);
len += 4;
}
}
skb->protocol = eth_type_trans(skb, bp->dev); skb->protocol = eth_type_trans(skb, bp->dev);
if ((len > (bp->dev->mtu + ETH_HLEN)) && if ((len > (bp->dev->mtu + ETH_HLEN)) &&
...@@ -2962,10 +2983,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) ...@@ -2962,10 +2983,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
} }
#ifdef BCM_VLAN #ifdef BCM_VLAN
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) { if (hw_vlan)
vlan_hwaccel_receive_skb(skb, bp->vlgrp, vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
rx_hdr->l2_fhdr_vlan_tag);
}
else else
#endif #endif
netif_receive_skb(skb); netif_receive_skb(skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment