Commit 0a325758 authored by David S. Miller's avatar David S. Miller

Merge branch 'qlge'

Jitendra Kalsaria says:

====================
This patch series enhance the handling of nested vlan tags in Rx path.

V2 changes:
* removed module parameter.

V3 changes:
* Users can enable or disable hardware VLAN acceleration using ethtool
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fac87a8e 146669a5
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
*/ */
#define DRV_NAME "qlge" #define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
#define DRV_VERSION "v1.00.00.32" #define DRV_VERSION "1.00.00.33"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
......
...@@ -96,8 +96,10 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = { ...@@ -96,8 +96,10 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
static int ql_wol(struct ql_adapter *qdev); static int ql_wol(struct ql_adapter *);
static void qlge_set_multicast_list(struct net_device *ndev); static void qlge_set_multicast_list(struct net_device *);
static int ql_adapter_down(struct ql_adapter *);
static int ql_adapter_up(struct ql_adapter *);
/* This hardware semaphore causes exclusive access to /* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware, * resources shared between the NIC driver, MPI firmware,
...@@ -1464,6 +1466,29 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, ...@@ -1464,6 +1466,29 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
} }
} }
/**
* ql_update_mac_hdr_len - helper routine to update the mac header length
* based on vlan tags if present
*/
static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
struct ib_mac_iocb_rsp *ib_mac_rsp,
void *page, size_t *len)
{
u16 *tags;
if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
return;
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
tags = (u16 *)page;
/* Look for stacked vlan tags in ethertype field */
if (tags[6] == ETH_P_8021Q &&
tags[8] == ETH_P_8021Q)
*len += 2 * VLAN_HLEN;
else
*len += VLAN_HLEN;
}
}
/* Process an inbound completion from an rx ring. */ /* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring, struct rx_ring *rx_ring,
...@@ -1523,6 +1548,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, ...@@ -1523,6 +1548,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
void *addr; void *addr;
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi; struct napi_struct *napi = &rx_ring->napi;
size_t hlen = ETH_HLEN;
skb = netdev_alloc_skb(ndev, length); skb = netdev_alloc_skb(ndev, length);
if (!skb) { if (!skb) {
...@@ -1540,25 +1566,28 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, ...@@ -1540,25 +1566,28 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
goto err_out; goto err_out;
} }
/* Update the MAC header length*/
ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
/* The max framesize filter on this chip is set higher than /* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames. * MTU since FCoE uses 2k frames.
*/ */
if (skb->len > ndev->mtu + ETH_HLEN) { if (skb->len > ndev->mtu + hlen) {
netif_err(qdev, drv, qdev->ndev, netif_err(qdev, drv, qdev->ndev,
"Segment too small, dropping.\n"); "Segment too small, dropping.\n");
rx_ring->rx_dropped++; rx_ring->rx_dropped++;
goto err_out; goto err_out;
} }
memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); memcpy(skb_put(skb, hlen), addr, hlen);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
length); length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset+ETH_HLEN, lbq_desc->p.pg_chunk.offset + hlen,
length-ETH_HLEN); length - hlen);
skb->len += length-ETH_HLEN; skb->len += length - hlen;
skb->data_len += length-ETH_HLEN; skb->data_len += length - hlen;
skb->truesize += length-ETH_HLEN; skb->truesize += length - hlen;
rx_ring->rx_packets++; rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len; rx_ring->rx_bytes += skb->len;
...@@ -1576,7 +1605,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, ...@@ -1576,7 +1605,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */ /* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = struct iphdr *iph =
(struct iphdr *) ((u8 *)addr + ETH_HLEN); (struct iphdr *)((u8 *)addr + hlen);
if (!(iph->frag_off & if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) { htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
...@@ -1727,6 +1756,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, ...@@ -1727,6 +1756,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
u32 length = le32_to_cpu(ib_mac_rsp->data_len); u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
size_t hlen = ETH_HLEN;
/* /*
* Handle the header buffer if present. * Handle the header buffer if present.
...@@ -1853,9 +1883,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, ...@@ -1853,9 +1883,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb->data_len += length; skb->data_len += length;
skb->truesize += length; skb->truesize += length;
length -= length; length -= length;
__pskb_pull_tail(skb, ql_update_mac_hdr_len(qdev, ib_mac_rsp,
(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? lbq_desc->p.pg_chunk.va,
VLAN_ETH_HLEN : ETH_HLEN); &hlen);
__pskb_pull_tail(skb, hlen);
} }
} else { } else {
/* /*
...@@ -1910,8 +1941,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, ...@@ -1910,8 +1941,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
length -= size; length -= size;
i++; i++;
} }
__pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
VLAN_ETH_HLEN : ETH_HLEN); &hlen);
__pskb_pull_tail(skb, hlen);
} }
return skb; return skb;
} }
...@@ -2003,7 +2035,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, ...@@ -2003,7 +2035,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
rx_ring->rx_packets++; rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len; rx_ring->rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id); skb_record_rx_queue(skb, rx_ring->cq_id);
if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) if (vlan_id != 0xffff)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
if (skb->ip_summed == CHECKSUM_UNNECESSARY) if (skb->ip_summed == CHECKSUM_UNNECESSARY)
napi_gro_receive(&rx_ring->napi, skb); napi_gro_receive(&rx_ring->napi, skb);
...@@ -2017,7 +2049,8 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, ...@@ -2017,7 +2049,8 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
struct ib_mac_iocb_rsp *ib_mac_rsp) struct ib_mac_iocb_rsp *ib_mac_rsp)
{ {
u32 length = le32_to_cpu(ib_mac_rsp->data_len); u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
(qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
((le16_to_cpu(ib_mac_rsp->vlan_id) & ((le16_to_cpu(ib_mac_rsp->vlan_id) &
IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
...@@ -2310,9 +2343,39 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features) ...@@ -2310,9 +2343,39 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
} }
} }
/**
* qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
* based on the features to enable/disable hardware vlan accel
*/
static int qlge_update_hw_vlan_features(struct net_device *ndev,
netdev_features_t features)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status = 0;
status = ql_adapter_down(qdev);
if (status) {
netif_err(qdev, link, qdev->ndev,
"Failed to bring down the adapter\n");
return status;
}
/* update the features with resent change */
ndev->features = features;
status = ql_adapter_up(qdev);
if (status) {
netif_err(qdev, link, qdev->ndev,
"Failed to bring up the adapter\n");
return status;
}
return status;
}
static netdev_features_t qlge_fix_features(struct net_device *ndev, static netdev_features_t qlge_fix_features(struct net_device *ndev,
netdev_features_t features) netdev_features_t features)
{ {
int err;
/* /*
* Since there is no support for separate rx/tx vlan accel * Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx. * enable/disable make sure tx flag is always in same state as rx.
...@@ -2322,6 +2385,11 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev, ...@@ -2322,6 +2385,11 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
else else
features &= ~NETIF_F_HW_VLAN_CTAG_TX; features &= ~NETIF_F_HW_VLAN_CTAG_TX;
/* Update the behavior of vlan accel in the adapter */
err = qlge_update_hw_vlan_features(ndev, features);
if (err)
return err;
return features; return features;
} }
...@@ -3704,8 +3772,12 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) ...@@ -3704,8 +3772,12 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
ql_write32(qdev, SYS, mask | value); ql_write32(qdev, SYS, mask | value);
/* Set the default queue, and VLAN behavior. */ /* Set the default queue, and VLAN behavior. */
value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; value = NIC_RCV_CFG_DFQ;
mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); mask = NIC_RCV_CFG_DFQ_MASK;
if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
value |= NIC_RCV_CFG_RV;
mask |= (NIC_RCV_CFG_RV << 16);
}
ql_write32(qdev, NIC_RCV_CFG, (mask | value)); ql_write32(qdev, NIC_RCV_CFG, (mask | value));
/* Set the MPI interrupt to enabled. */ /* Set the MPI interrupt to enabled. */
...@@ -4692,11 +4764,15 @@ static int qlge_probe(struct pci_dev *pdev, ...@@ -4692,11 +4764,15 @@ static int qlge_probe(struct pci_dev *pdev,
qdev = netdev_priv(ndev); qdev = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev); SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | ndev->hw_features = NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM; NETIF_F_TSO |
ndev->features = ndev->hw_features | NETIF_F_TSO_ECN |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_RXCSUM;
ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features; ndev->vlan_features = ndev->hw_features;
if (test_bit(QL_DMA64, &qdev->flags)) if (test_bit(QL_DMA64, &qdev->flags))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment