Commit 421355de authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

parents aace4959 0fe7463a
...@@ -3654,6 +3654,7 @@ NETWORKING [GENERAL] ...@@ -3654,6 +3654,7 @@ NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net> M: "David S. Miller" <davem@davemloft.net>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net W: http://www.linuxfoundation.org/en/Net
W: http://patchwork.ozlabs.org/project/netdev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
S: Maintained S: Maintained
F: net/ F: net/
...@@ -5635,6 +5636,13 @@ S: Maintained ...@@ -5635,6 +5636,13 @@ S: Maintained
F: drivers/vlynq/vlynq.c F: drivers/vlynq/vlynq.c
F: include/linux/vlynq.h F: include/linux/vlynq.h
VMWARE VMXNET3 ETHERNET DRIVER
M: Shreyas Bhatewara <sbhatewara@vmware.com>
M: VMware, Inc. <pv-drivers@vmware.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/vmxnet3/
VOLTAGE AND CURRENT REGULATOR FRAMEWORK VOLTAGE AND CURRENT REGULATOR FRAMEWORK
M: Liam Girdwood <lrg@slimlogic.co.uk> M: Liam Girdwood <lrg@slimlogic.co.uk>
M: Mark Brown <broonie@opensource.wolfsonmicro.com> M: Mark Brown <broonie@opensource.wolfsonmicro.com>
......
...@@ -3230,4 +3230,12 @@ config VIRTIO_NET ...@@ -3230,4 +3230,12 @@ config VIRTIO_NET
This is the virtual network driver for virtio. It can be used with This is the virtual network driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
config VMXNET3
tristate "VMware VMXNET3 ethernet driver"
depends on PCI && X86
help
This driver supports VMware's vmxnet3 virtual ethernet NIC.
To compile this driver as a module, choose M here: the
module will be called vmxnet3.
endif # NETDEVICES endif # NETDEVICES
...@@ -30,6 +30,7 @@ obj-$(CONFIG_TEHUTI) += tehuti.o ...@@ -30,6 +30,7 @@ obj-$(CONFIG_TEHUTI) += tehuti.o
obj-$(CONFIG_ENIC) += enic/ obj-$(CONFIG_ENIC) += enic/
obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_JME) += jme.o
obj-$(CONFIG_BE2NET) += benet/ obj-$(CONFIG_BE2NET) += benet/
obj-$(CONFIG_VMXNET3) += vmxnet3/
gianfar_driver-objs := gianfar.o \ gianfar_driver-objs := gianfar.o \
gianfar_ethtool.o \ gianfar_ethtool.o \
......
...@@ -1209,7 +1209,8 @@ static int __devinit ace_init(struct net_device *dev) ...@@ -1209,7 +1209,8 @@ static int __devinit ace_init(struct net_device *dev)
memset(ap->info, 0, sizeof(struct ace_info)); memset(ap->info, 0, sizeof(struct ace_info));
memset(ap->skb, 0, sizeof(struct ace_skb)); memset(ap->skb, 0, sizeof(struct ace_skb));
if (ace_load_firmware(dev)) ecode = ace_load_firmware(dev);
if (ecode)
goto init_error; goto init_error;
ap->fw_running = 0; ap->fw_running = 0;
......
...@@ -330,6 +330,9 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; ...@@ -330,6 +330,9 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01) #define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01)
#define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02) #define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02)
/* EMAC Stats Clear Mask */
#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
/** net_buf_obj: EMAC network bufferdata structure /** net_buf_obj: EMAC network bufferdata structure
* *
* EMAC network buffer data structure * EMAC network buffer data structure
...@@ -2544,40 +2547,49 @@ static int emac_dev_stop(struct net_device *ndev) ...@@ -2544,40 +2547,49 @@ static int emac_dev_stop(struct net_device *ndev)
static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
{ {
struct emac_priv *priv = netdev_priv(ndev); struct emac_priv *priv = netdev_priv(ndev);
u32 mac_control;
u32 stats_clear_mask;
/* update emac hardware stats and reset the registers*/ /* update emac hardware stats and reset the registers*/
mac_control = emac_read(EMAC_MACCONTROL);
if (mac_control & EMAC_MACCONTROL_GMIIEN)
stats_clear_mask = EMAC_STATS_CLR_MASK;
else
stats_clear_mask = 0;
priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES); priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
emac_write(EMAC_RXMCASTFRAMES, EMAC_ALL_MULTI_REG_VALUE); emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask);
priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) + priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) +
emac_read(EMAC_TXSINGLECOLL) + emac_read(EMAC_TXSINGLECOLL) +
emac_read(EMAC_TXMULTICOLL)); emac_read(EMAC_TXMULTICOLL));
emac_write(EMAC_TXCOLLISION, EMAC_ALL_MULTI_REG_VALUE); emac_write(EMAC_TXCOLLISION, stats_clear_mask);
emac_write(EMAC_TXSINGLECOLL, EMAC_ALL_MULTI_REG_VALUE); emac_write(EMAC_TXSINGLECOLL, stats_clear_mask);
emac_write(EMAC_TXMULTICOLL, EMAC_ALL_MULTI_REG_VALUE); emac_write(EMAC_TXMULTICOLL, stats_clear_mask);
priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) + priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
emac_read(EMAC_RXJABBER) + emac_read(EMAC_RXJABBER) +
emac_read(EMAC_RXUNDERSIZED)); emac_read(EMAC_RXUNDERSIZED));
emac_write(EMAC_RXOVERSIZED, EMAC_ALL_MULTI_REG_VALUE); emac_write(EMAC_RXOVERSIZED, stats_clear_mask);
emac_write(EMAC_RXJABBER, EMAC_ALL_MULTI_REG_VALUE); emac_write(EMAC_RXJABBER, stats_clear_mask);
emac_write(EMAC_RXUNDERSIZED, EMAC_ALL_MULTI_REG_VALUE); emac_write(EMAC_RXUNDERSIZED, stats_clear_mask);
priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) + priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
emac_read(EMAC_RXMOFOVERRUNS)); emac_read(EMAC_RXMOFOVERRUNS));
emac_write(EMAC_RXSOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask);
emac_write(EMAC_RXMOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask);
priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS); priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
emac_write(EMAC_RXDMAOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask);
priv->net_dev_stats.tx_carrier_errors += priv->net_dev_stats.tx_carrier_errors +=
emac_read(EMAC_TXCARRIERSENSE); emac_read(EMAC_TXCARRIERSENSE);
emac_write(EMAC_TXCARRIERSENSE, EMAC_ALL_MULTI_REG_VALUE); emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN); priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
emac_write(EMAC_TXUNDERRUN, EMAC_ALL_MULTI_REG_VALUE); emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
return &priv->net_dev_stats; return &priv->net_dev_stats;
} }
......
...@@ -663,7 +663,8 @@ static int ethoc_open(struct net_device *dev) ...@@ -663,7 +663,8 @@ static int ethoc_open(struct net_device *dev)
return ret; return ret;
/* calculate the number of TX/RX buffers, maximum 128 supported */ /* calculate the number of TX/RX buffers, maximum 128 supported */
num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ); num_bd = min_t(unsigned int,
128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
priv->num_tx = max(min_tx, num_bd / 4); priv->num_tx = max(min_tx, num_bd / 4);
priv->num_rx = num_bd - priv->num_tx; priv->num_rx = num_bd - priv->num_tx;
ethoc_write(priv, TX_BD_NUM, priv->num_tx); ethoc_write(priv, TX_BD_NUM, priv->num_tx);
......
...@@ -232,8 +232,11 @@ static int sa1100_irda_startup(struct sa1100_irda *si) ...@@ -232,8 +232,11 @@ static int sa1100_irda_startup(struct sa1100_irda *si)
/* /*
* Ensure that the ports for this device are setup correctly. * Ensure that the ports for this device are setup correctly.
*/ */
if (si->pdata->startup) if (si->pdata->startup) {
si->pdata->startup(si->dev); ret = si->pdata->startup(si->dev);
if (ret)
return ret;
}
/* /*
* Configure PPC for IRDA - we want to drive TXD2 low. * Configure PPC for IRDA - we want to drive TXD2 low.
......
...@@ -119,24 +119,9 @@ static struct ixp2400_msf_parameters enp2611_msf_parameters = ...@@ -119,24 +119,9 @@ static struct ixp2400_msf_parameters enp2611_msf_parameters =
} }
}; };
struct enp2611_ixpdev_priv
{
struct ixpdev_priv ixpdev_priv;
struct net_device_stats stats;
};
static struct net_device *nds[3]; static struct net_device *nds[3];
static struct timer_list link_check_timer; static struct timer_list link_check_timer;
static struct net_device_stats *enp2611_get_stats(struct net_device *dev)
{
struct enp2611_ixpdev_priv *ip = netdev_priv(dev);
pm3386_get_stats(ip->ixpdev_priv.channel, &(ip->stats));
return &(ip->stats);
}
/* @@@ Poll the SFP moddef0 line too. */ /* @@@ Poll the SFP moddef0 line too. */
/* @@@ Try to use the pm3386 DOOL interrupt as well. */ /* @@@ Try to use the pm3386 DOOL interrupt as well. */
static void enp2611_check_link_status(unsigned long __dummy) static void enp2611_check_link_status(unsigned long __dummy)
...@@ -203,14 +188,13 @@ static int __init enp2611_init_module(void) ...@@ -203,14 +188,13 @@ static int __init enp2611_init_module(void)
ports = pm3386_port_count(); ports = pm3386_port_count();
for (i = 0; i < ports; i++) { for (i = 0; i < ports; i++) {
nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
if (nds[i] == NULL) { if (nds[i] == NULL) {
while (--i >= 0) while (--i >= 0)
free_netdev(nds[i]); free_netdev(nds[i]);
return -ENOMEM; return -ENOMEM;
} }
nds[i]->get_stats = enp2611_get_stats;
pm3386_init_port(i); pm3386_init_port(i);
pm3386_get_mac(i, nds[i]->dev_addr); pm3386_get_mac(i, nds[i]->dev_addr);
} }
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "ixp2400_tx.ucode" #include "ixp2400_tx.ucode"
#include "ixpdev_priv.h" #include "ixpdev_priv.h"
#include "ixpdev.h" #include "ixpdev.h"
#include "pm3386.h"
#define DRV_MODULE_VERSION "0.2" #define DRV_MODULE_VERSION "0.2"
...@@ -270,6 +271,15 @@ static int ixpdev_close(struct net_device *dev) ...@@ -270,6 +271,15 @@ static int ixpdev_close(struct net_device *dev)
return 0; return 0;
} }
static struct net_device_stats *ixpdev_get_stats(struct net_device *dev)
{
struct ixpdev_priv *ip = netdev_priv(dev);
pm3386_get_stats(ip->channel, &(dev->stats));
return &(dev->stats);
}
static const struct net_device_ops ixpdev_netdev_ops = { static const struct net_device_ops ixpdev_netdev_ops = {
.ndo_open = ixpdev_open, .ndo_open = ixpdev_open,
.ndo_stop = ixpdev_close, .ndo_stop = ixpdev_close,
...@@ -277,6 +287,7 @@ static const struct net_device_ops ixpdev_netdev_ops = { ...@@ -277,6 +287,7 @@ static const struct net_device_ops ixpdev_netdev_ops = {
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr, .ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats = ixpdev_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixpdev_poll_controller, .ndo_poll_controller = ixpdev_poll_controller,
#endif #endif
......
...@@ -597,7 +597,8 @@ netxen_setup_pci_map(struct netxen_adapter *adapter) ...@@ -597,7 +597,8 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
void __iomem *mem_ptr2 = NULL; void __iomem *mem_ptr2 = NULL;
void __iomem *db_ptr = NULL; void __iomem *db_ptr = NULL;
unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0; resource_size_t mem_base, db_base;
unsigned long mem_len, db_len = 0, pci_len0 = 0;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
int pci_func = adapter->ahw.pci_func; int pci_func = adapter->ahw.pci_func;
......
...@@ -251,6 +251,7 @@ static void el3_tx_timeout(struct net_device *dev); ...@@ -251,6 +251,7 @@ static void el3_tx_timeout(struct net_device *dev);
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops; static const struct ethtool_ops netdev_ethtool_ops;
static void set_rx_mode(struct net_device *dev); static void set_rx_mode(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static void tc574_detach(struct pcmcia_device *p_dev); static void tc574_detach(struct pcmcia_device *p_dev);
...@@ -266,7 +267,7 @@ static const struct net_device_ops el3_netdev_ops = { ...@@ -266,7 +267,7 @@ static const struct net_device_ops el3_netdev_ops = {
.ndo_tx_timeout = el3_tx_timeout, .ndo_tx_timeout = el3_tx_timeout,
.ndo_get_stats = el3_get_stats, .ndo_get_stats = el3_get_stats,
.ndo_do_ioctl = el3_ioctl, .ndo_do_ioctl = el3_ioctl,
.ndo_set_multicast_list = set_rx_mode, .ndo_set_multicast_list = set_multicast_list,
.ndo_change_mtu = eth_change_mtu, .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr, .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
...@@ -1161,6 +1162,16 @@ static void set_rx_mode(struct net_device *dev) ...@@ -1161,6 +1162,16 @@ static void set_rx_mode(struct net_device *dev)
outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
} }
static void set_multicast_list(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&lp->window_lock, flags);
set_rx_mode(dev);
spin_unlock_irqrestore(&lp->window_lock, flags);
}
static int el3_close(struct net_device *dev) static int el3_close(struct net_device *dev)
{ {
unsigned int ioaddr = dev->base_addr; unsigned int ioaddr = dev->base_addr;
......
...@@ -115,7 +115,9 @@ enum mac_version { ...@@ -115,7 +115,9 @@ enum mac_version {
RTL_GIGA_MAC_VER_22 = 0x16, // 8168C RTL_GIGA_MAC_VER_22 = 0x16, // 8168C
RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP
RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP
RTL_GIGA_MAC_VER_25 = 0x19 // 8168D RTL_GIGA_MAC_VER_25 = 0x19, // 8168D
RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
RTL_GIGA_MAC_VER_27 = 0x1b // 8168DP
}; };
#define _R(NAME,MAC,MASK) \ #define _R(NAME,MAC,MASK) \
...@@ -150,7 +152,9 @@ static const struct { ...@@ -150,7 +152,9 @@ static const struct {
_R("RTL8168c/8111c", RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E
_R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E
_R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E
_R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880) // PCI-E _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
_R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
_R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880) // PCI-E
}; };
#undef _R #undef _R
...@@ -253,6 +257,13 @@ enum rtl8168_8101_registers { ...@@ -253,6 +257,13 @@ enum rtl8168_8101_registers {
DBG_REG = 0xd1, DBG_REG = 0xd1,
#define FIX_NAK_1 (1 << 4) #define FIX_NAK_1 (1 << 4)
#define FIX_NAK_2 (1 << 3) #define FIX_NAK_2 (1 << 3)
EFUSEAR = 0xdc,
#define EFUSEAR_FLAG 0x80000000
#define EFUSEAR_WRITE_CMD 0x80000000
#define EFUSEAR_READ_CMD 0x00000000
#define EFUSEAR_REG_MASK 0x03ff
#define EFUSEAR_REG_SHIFT 8
#define EFUSEAR_DATA_MASK 0xff
}; };
enum rtl_register_content { enum rtl_register_content {
...@@ -568,6 +579,14 @@ static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value) ...@@ -568,6 +579,14 @@ static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value)
mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value); mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value);
} }
static void mdio_plus_minus(void __iomem *ioaddr, int reg_addr, int p, int m)
{
int val;
val = mdio_read(ioaddr, reg_addr);
mdio_write(ioaddr, reg_addr, (val | p) & ~m);
}
static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
int val) int val)
{ {
...@@ -651,6 +670,24 @@ static u32 rtl_csi_read(void __iomem *ioaddr, int addr) ...@@ -651,6 +670,24 @@ static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
return value; return value;
} }
static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
{
u8 value = 0xff;
unsigned int i;
RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
for (i = 0; i < 300; i++) {
if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
break;
}
udelay(100);
}
return value;
}
static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
{ {
RTL_W16(IntrMask, 0x0000); RTL_W16(IntrMask, 0x0000);
...@@ -1243,7 +1280,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, ...@@ -1243,7 +1280,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
int mac_version; int mac_version;
} mac_info[] = { } mac_info[] = {
/* 8168D family. */ /* 8168D family. */
{ 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_25 }, { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
{ 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
{ 0x7c800000, 0x28800000, RTL_GIGA_MAC_VER_27 },
{ 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
/* 8168C family. */ /* 8168C family. */
{ 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 }, { 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 },
...@@ -1648,74 +1688,903 @@ static void rtl8168c_4_hw_phy_config(void __iomem *ioaddr) ...@@ -1648,74 +1688,903 @@ static void rtl8168c_4_hw_phy_config(void __iomem *ioaddr)
rtl8168c_3_hw_phy_config(ioaddr); rtl8168c_3_hw_phy_config(ioaddr);
} }
static void rtl8168d_hw_phy_config(void __iomem *ioaddr) static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
{ {
struct phy_reg phy_reg_init_0[] = { static struct phy_reg phy_reg_init_0[] = {
{ 0x1f, 0x0001 }, { 0x1f, 0x0001 },
{ 0x09, 0x2770 }, { 0x06, 0x4064 },
{ 0x08, 0x04d0 }, { 0x07, 0x2863 },
{ 0x0b, 0xad15 }, { 0x08, 0x059c },
{ 0x0c, 0x5bf0 }, { 0x09, 0x26b4 },
{ 0x1c, 0xf101 }, { 0x0a, 0x6a19 },
{ 0x0b, 0xdcc8 },
{ 0x10, 0xf06d },
{ 0x14, 0x7f68 },
{ 0x18, 0x7fd9 },
{ 0x1c, 0xf0ff },
{ 0x1d, 0x3d9c },
{ 0x1f, 0x0003 }, { 0x1f, 0x0003 },
{ 0x14, 0x94d7 }, { 0x12, 0xf49f },
{ 0x12, 0xf4d6 }, { 0x13, 0x070b },
{ 0x09, 0xca0f }, { 0x1a, 0x05ad },
{ 0x1f, 0x0002 }, { 0x14, 0x94c0 }
{ 0x0b, 0x0b10 }, };
{ 0x0c, 0xd1f7 }, static struct phy_reg phy_reg_init_1[] = {
{ 0x1f, 0x0002 },
{ 0x06, 0x5461 },
{ 0x1f, 0x0002 }, { 0x1f, 0x0002 },
{ 0x05, 0x6662 }, { 0x06, 0x5561 },
{ 0x1f, 0x0005 },
{ 0x05, 0x8332 },
{ 0x06, 0x5561 }
};
static struct phy_reg phy_reg_init_2[] = {
{ 0x1f, 0x0005 },
{ 0x05, 0xffc2 },
{ 0x1f, 0x0005 },
{ 0x05, 0x8000 },
{ 0x06, 0xf8f9 },
{ 0x06, 0xfaef },
{ 0x06, 0x59ee },
{ 0x06, 0xf8ea },
{ 0x06, 0x00ee },
{ 0x06, 0xf8eb },
{ 0x06, 0x00e0 },
{ 0x06, 0xf87c },
{ 0x06, 0xe1f8 },
{ 0x06, 0x7d59 },
{ 0x06, 0x0fef },
{ 0x06, 0x0139 },
{ 0x06, 0x029e },
{ 0x06, 0x06ef },
{ 0x06, 0x1039 },
{ 0x06, 0x089f },
{ 0x06, 0x2aee },
{ 0x06, 0xf8ea },
{ 0x06, 0x00ee },
{ 0x06, 0xf8eb },
{ 0x06, 0x01e0 },
{ 0x06, 0xf87c },
{ 0x06, 0xe1f8 },
{ 0x06, 0x7d58 },
{ 0x06, 0x409e },
{ 0x06, 0x0f39 },
{ 0x06, 0x46aa },
{ 0x06, 0x0bbf },
{ 0x06, 0x8290 },
{ 0x06, 0xd682 },
{ 0x06, 0x9802 },
{ 0x06, 0x014f },
{ 0x06, 0xae09 },
{ 0x06, 0xbf82 },
{ 0x06, 0x98d6 },
{ 0x06, 0x82a0 },
{ 0x06, 0x0201 },
{ 0x06, 0x4fef },
{ 0x06, 0x95fe },
{ 0x06, 0xfdfc },
{ 0x06, 0x05f8 },
{ 0x06, 0xf9fa },
{ 0x06, 0xeef8 },
{ 0x06, 0xea00 },
{ 0x06, 0xeef8 },
{ 0x06, 0xeb00 },
{ 0x06, 0xe2f8 },
{ 0x06, 0x7ce3 },
{ 0x06, 0xf87d },
{ 0x06, 0xa511 },
{ 0x06, 0x1112 },
{ 0x06, 0xd240 },
{ 0x06, 0xd644 },
{ 0x06, 0x4402 },
{ 0x06, 0x8217 },
{ 0x06, 0xd2a0 },
{ 0x06, 0xd6aa },
{ 0x06, 0xaa02 },
{ 0x06, 0x8217 },
{ 0x06, 0xae0f },
{ 0x06, 0xa544 },
{ 0x06, 0x4402 },
{ 0x06, 0xae4d },
{ 0x06, 0xa5aa },
{ 0x06, 0xaa02 },
{ 0x06, 0xae47 },
{ 0x06, 0xaf82 },
{ 0x06, 0x13ee },
{ 0x06, 0x834e },
{ 0x06, 0x00ee },
{ 0x06, 0x834d },
{ 0x06, 0x0fee },
{ 0x06, 0x834c },
{ 0x06, 0x0fee },
{ 0x06, 0x834f },
{ 0x06, 0x00ee },
{ 0x06, 0x8351 },
{ 0x06, 0x00ee },
{ 0x06, 0x834a },
{ 0x06, 0xffee },
{ 0x06, 0x834b },
{ 0x06, 0xffe0 },
{ 0x06, 0x8330 },
{ 0x06, 0xe183 },
{ 0x06, 0x3158 },
{ 0x06, 0xfee4 },
{ 0x06, 0xf88a },
{ 0x06, 0xe5f8 },
{ 0x06, 0x8be0 },
{ 0x06, 0x8332 },
{ 0x06, 0xe183 },
{ 0x06, 0x3359 },
{ 0x06, 0x0fe2 },
{ 0x06, 0x834d },
{ 0x06, 0x0c24 },
{ 0x06, 0x5af0 },
{ 0x06, 0x1e12 },
{ 0x06, 0xe4f8 },
{ 0x06, 0x8ce5 },
{ 0x06, 0xf88d },
{ 0x06, 0xaf82 },
{ 0x06, 0x13e0 },
{ 0x06, 0x834f },
{ 0x06, 0x10e4 },
{ 0x06, 0x834f },
{ 0x06, 0xe083 },
{ 0x06, 0x4e78 },
{ 0x06, 0x009f },
{ 0x06, 0x0ae0 },
{ 0x06, 0x834f },
{ 0x06, 0xa010 },
{ 0x06, 0xa5ee },
{ 0x06, 0x834e },
{ 0x06, 0x01e0 },
{ 0x06, 0x834e },
{ 0x06, 0x7805 },
{ 0x06, 0x9e9a },
{ 0x06, 0xe083 },
{ 0x06, 0x4e78 },
{ 0x06, 0x049e },
{ 0x06, 0x10e0 },
{ 0x06, 0x834e },
{ 0x06, 0x7803 },
{ 0x06, 0x9e0f },
{ 0x06, 0xe083 },
{ 0x06, 0x4e78 },
{ 0x06, 0x019e },
{ 0x06, 0x05ae },
{ 0x06, 0x0caf },
{ 0x06, 0x81f8 },
{ 0x06, 0xaf81 },
{ 0x06, 0xa3af },
{ 0x06, 0x81dc },
{ 0x06, 0xaf82 },
{ 0x06, 0x13ee },
{ 0x06, 0x8348 },
{ 0x06, 0x00ee },
{ 0x06, 0x8349 },
{ 0x06, 0x00e0 },
{ 0x06, 0x8351 },
{ 0x06, 0x10e4 },
{ 0x06, 0x8351 },
{ 0x06, 0x5801 },
{ 0x06, 0x9fea },
{ 0x06, 0xd000 },
{ 0x06, 0xd180 },
{ 0x06, 0x1f66 },
{ 0x06, 0xe2f8 },
{ 0x06, 0xeae3 },
{ 0x06, 0xf8eb },
{ 0x06, 0x5af8 },
{ 0x06, 0x1e20 },
{ 0x06, 0xe6f8 },
{ 0x06, 0xeae5 },
{ 0x06, 0xf8eb },
{ 0x06, 0xd302 },
{ 0x06, 0xb3fe },
{ 0x06, 0xe2f8 },
{ 0x06, 0x7cef },
{ 0x06, 0x325b },
{ 0x06, 0x80e3 },
{ 0x06, 0xf87d },
{ 0x06, 0x9e03 },
{ 0x06, 0x7dff },
{ 0x06, 0xff0d },
{ 0x06, 0x581c },
{ 0x06, 0x551a },
{ 0x06, 0x6511 },
{ 0x06, 0xa190 },
{ 0x06, 0xd3e2 },
{ 0x06, 0x8348 },
{ 0x06, 0xe383 },
{ 0x06, 0x491b },
{ 0x06, 0x56ab },
{ 0x06, 0x08ef },
{ 0x06, 0x56e6 },
{ 0x06, 0x8348 },
{ 0x06, 0xe783 },
{ 0x06, 0x4910 },
{ 0x06, 0xd180 },
{ 0x06, 0x1f66 },
{ 0x06, 0xa004 },
{ 0x06, 0xb9e2 },
{ 0x06, 0x8348 },
{ 0x06, 0xe383 },
{ 0x06, 0x49ef },
{ 0x06, 0x65e2 },
{ 0x06, 0x834a },
{ 0x06, 0xe383 },
{ 0x06, 0x4b1b },
{ 0x06, 0x56aa },
{ 0x06, 0x0eef },
{ 0x06, 0x56e6 },
{ 0x06, 0x834a },
{ 0x06, 0xe783 },
{ 0x06, 0x4be2 },
{ 0x06, 0x834d },
{ 0x06, 0xe683 },
{ 0x06, 0x4ce0 },
{ 0x06, 0x834d },
{ 0x06, 0xa000 },
{ 0x06, 0x0caf },
{ 0x06, 0x81dc },
{ 0x06, 0xe083 },
{ 0x06, 0x4d10 },
{ 0x06, 0xe483 },
{ 0x06, 0x4dae },
{ 0x06, 0x0480 },
{ 0x06, 0xe483 },
{ 0x06, 0x4de0 },
{ 0x06, 0x834e },
{ 0x06, 0x7803 },
{ 0x06, 0x9e0b },
{ 0x06, 0xe083 },
{ 0x06, 0x4e78 },
{ 0x06, 0x049e },
{ 0x06, 0x04ee },
{ 0x06, 0x834e },
{ 0x06, 0x02e0 },
{ 0x06, 0x8332 },
{ 0x06, 0xe183 },
{ 0x06, 0x3359 },
{ 0x06, 0x0fe2 },
{ 0x06, 0x834d },
{ 0x06, 0x0c24 },
{ 0x06, 0x5af0 },
{ 0x06, 0x1e12 },
{ 0x06, 0xe4f8 },
{ 0x06, 0x8ce5 },
{ 0x06, 0xf88d },
{ 0x06, 0xe083 },
{ 0x06, 0x30e1 },
{ 0x06, 0x8331 },
{ 0x06, 0x6801 },
{ 0x06, 0xe4f8 },
{ 0x06, 0x8ae5 },
{ 0x06, 0xf88b },
{ 0x06, 0xae37 },
{ 0x06, 0xee83 },
{ 0x06, 0x4e03 },
{ 0x06, 0xe083 },
{ 0x06, 0x4ce1 },
{ 0x06, 0x834d },
{ 0x06, 0x1b01 },
{ 0x06, 0x9e04 },
{ 0x06, 0xaaa1 },
{ 0x06, 0xaea8 },
{ 0x06, 0xee83 },
{ 0x06, 0x4e04 },
{ 0x06, 0xee83 },
{ 0x06, 0x4f00 },
{ 0x06, 0xaeab },
{ 0x06, 0xe083 },
{ 0x06, 0x4f78 },
{ 0x06, 0x039f },
{ 0x06, 0x14ee },
{ 0x06, 0x834e },
{ 0x06, 0x05d2 },
{ 0x06, 0x40d6 },
{ 0x06, 0x5554 },
{ 0x06, 0x0282 },
{ 0x06, 0x17d2 },
{ 0x06, 0xa0d6 },
{ 0x06, 0xba00 },
{ 0x06, 0x0282 },
{ 0x06, 0x17fe },
{ 0x06, 0xfdfc },
{ 0x06, 0x05f8 },
{ 0x06, 0xe0f8 },
{ 0x06, 0x60e1 },
{ 0x06, 0xf861 },
{ 0x06, 0x6802 },
{ 0x06, 0xe4f8 },
{ 0x06, 0x60e5 },
{ 0x06, 0xf861 },
{ 0x06, 0xe0f8 },
{ 0x06, 0x48e1 },
{ 0x06, 0xf849 },
{ 0x06, 0x580f },
{ 0x06, 0x1e02 },
{ 0x06, 0xe4f8 },
{ 0x06, 0x48e5 },
{ 0x06, 0xf849 },
{ 0x06, 0xd000 },
{ 0x06, 0x0282 },
{ 0x06, 0x5bbf },
{ 0x06, 0x8350 },
{ 0x06, 0xef46 },
{ 0x06, 0xdc19 },
{ 0x06, 0xddd0 },
{ 0x06, 0x0102 },
{ 0x06, 0x825b },
{ 0x06, 0x0282 },
{ 0x06, 0x77e0 },
{ 0x06, 0xf860 },
{ 0x06, 0xe1f8 },
{ 0x06, 0x6158 },
{ 0x06, 0xfde4 },
{ 0x06, 0xf860 },
{ 0x06, 0xe5f8 },
{ 0x06, 0x61fc },
{ 0x06, 0x04f9 },
{ 0x06, 0xfafb },
{ 0x06, 0xc6bf },
{ 0x06, 0xf840 },
{ 0x06, 0xbe83 },
{ 0x06, 0x50a0 },
{ 0x06, 0x0101 },
{ 0x06, 0x071b },
{ 0x06, 0x89cf },
{ 0x06, 0xd208 },
{ 0x06, 0xebdb },
{ 0x06, 0x19b2 },
{ 0x06, 0xfbff },
{ 0x06, 0xfefd },
{ 0x06, 0x04f8 },
{ 0x06, 0xe0f8 },
{ 0x06, 0x48e1 },
{ 0x06, 0xf849 },
{ 0x06, 0x6808 },
{ 0x06, 0xe4f8 },
{ 0x06, 0x48e5 },
{ 0x06, 0xf849 },
{ 0x06, 0x58f7 },
{ 0x06, 0xe4f8 },
{ 0x06, 0x48e5 },
{ 0x06, 0xf849 },
{ 0x06, 0xfc04 },
{ 0x06, 0x4d20 },
{ 0x06, 0x0002 },
{ 0x06, 0x4e22 },
{ 0x06, 0x0002 },
{ 0x06, 0x4ddf },
{ 0x06, 0xff01 },
{ 0x06, 0x4edd },
{ 0x06, 0xff01 },
{ 0x05, 0x83d4 },
{ 0x06, 0x8000 },
{ 0x05, 0x83d8 },
{ 0x06, 0x8051 },
{ 0x02, 0x6010 },
{ 0x03, 0xdc00 },
{ 0x05, 0xfff6 },
{ 0x06, 0x00fc },
{ 0x1f, 0x0000 }, { 0x1f, 0x0000 },
{ 0x14, 0x0060 },
{ 0x1f, 0x0000 }, { 0x1f, 0x0000 },
{ 0x0d, 0xf8a0 }, { 0x0d, 0xf880 },
{ 0x1f, 0x0000 }
};
rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
mdio_write(ioaddr, 0x1f, 0x0002);
mdio_plus_minus(ioaddr, 0x0b, 0x0010, 0x00ef);
mdio_plus_minus(ioaddr, 0x0c, 0xa200, 0x5d00);
rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
{ 0x05, 0x669a },
{ 0x1f, 0x0005 },
{ 0x05, 0x8330 },
{ 0x06, 0x669a },
{ 0x1f, 0x0002 }
};
int val;
rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
val = mdio_read(ioaddr, 0x0d);
if ((val & 0x00ff) != 0x006c) {
u32 set[] = {
0x0065, 0x0066, 0x0067, 0x0068,
0x0069, 0x006a, 0x006b, 0x006c
};
int i;
mdio_write(ioaddr, 0x1f, 0x0002);
val &= 0xff00;
for (i = 0; i < ARRAY_SIZE(set); i++)
mdio_write(ioaddr, 0x0d, val | set[i]);
}
} else {
struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
{ 0x05, 0x6662 },
{ 0x1f, 0x0005 },
{ 0x05, 0x8330 },
{ 0x06, 0x6662 }
};
rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
mdio_write(ioaddr, 0x1f, 0x0002);
mdio_patch(ioaddr, 0x0d, 0x0300);
mdio_patch(ioaddr, 0x0f, 0x0010);
mdio_write(ioaddr, 0x1f, 0x0002);
mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600);
mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000);
rtl_phy_write(ioaddr, phy_reg_init_2, ARRAY_SIZE(phy_reg_init_2));
}
static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
{
static struct phy_reg phy_reg_init_0[] = {
{ 0x1f, 0x0001 },
{ 0x06, 0x4064 },
{ 0x07, 0x2863 },
{ 0x08, 0x059c },
{ 0x09, 0x26b4 },
{ 0x0a, 0x6a19 },
{ 0x0b, 0xdcc8 },
{ 0x10, 0xf06d },
{ 0x14, 0x7f68 },
{ 0x18, 0x7fd9 },
{ 0x1c, 0xf0ff },
{ 0x1d, 0x3d9c },
{ 0x1f, 0x0003 },
{ 0x12, 0xf49f },
{ 0x13, 0x070b },
{ 0x1a, 0x05ad },
{ 0x14, 0x94c0 },
{ 0x1f, 0x0002 },
{ 0x06, 0x5561 },
{ 0x1f, 0x0005 },
{ 0x05, 0x8332 },
{ 0x06, 0x5561 }
};
static struct phy_reg phy_reg_init_1[] = {
{ 0x1f, 0x0005 },
{ 0x05, 0xffc2 },
{ 0x1f, 0x0005 }, { 0x1f, 0x0005 },
{ 0x05, 0xffc2 } { 0x05, 0x8000 },
{ 0x06, 0xf8f9 },
{ 0x06, 0xfaee },
{ 0x06, 0xf8ea },
{ 0x06, 0x00ee },
{ 0x06, 0xf8eb },
{ 0x06, 0x00e2 },
{ 0x06, 0xf87c },
{ 0x06, 0xe3f8 },
{ 0x06, 0x7da5 },
{ 0x06, 0x1111 },
{ 0x06, 0x12d2 },
{ 0x06, 0x40d6 },
{ 0x06, 0x4444 },
{ 0x06, 0x0281 },
{ 0x06, 0xc6d2 },
{ 0x06, 0xa0d6 },
{ 0x06, 0xaaaa },
{ 0x06, 0x0281 },
{ 0x06, 0xc6ae },
{ 0x06, 0x0fa5 },
{ 0x06, 0x4444 },
{ 0x06, 0x02ae },
{ 0x06, 0x4da5 },
{ 0x06, 0xaaaa },
{ 0x06, 0x02ae },
{ 0x06, 0x47af },
{ 0x06, 0x81c2 },
{ 0x06, 0xee83 },
{ 0x06, 0x4e00 },
{ 0x06, 0xee83 },
{ 0x06, 0x4d0f },
{ 0x06, 0xee83 },
{ 0x06, 0x4c0f },
{ 0x06, 0xee83 },
{ 0x06, 0x4f00 },
{ 0x06, 0xee83 },
{ 0x06, 0x5100 },
{ 0x06, 0xee83 },
{ 0x06, 0x4aff },
{ 0x06, 0xee83 },
{ 0x06, 0x4bff },
{ 0x06, 0xe083 },
{ 0x06, 0x30e1 },
{ 0x06, 0x8331 },
{ 0x06, 0x58fe },
{ 0x06, 0xe4f8 },
{ 0x06, 0x8ae5 },
{ 0x06, 0xf88b },
{ 0x06, 0xe083 },
{ 0x06, 0x32e1 },
{ 0x06, 0x8333 },
{ 0x06, 0x590f },
{ 0x06, 0xe283 },
{ 0x06, 0x4d0c },
{ 0x06, 0x245a },
{ 0x06, 0xf01e },
{ 0x06, 0x12e4 },
{ 0x06, 0xf88c },
{ 0x06, 0xe5f8 },
{ 0x06, 0x8daf },
{ 0x06, 0x81c2 },
{ 0x06, 0xe083 },
{ 0x06, 0x4f10 },
{ 0x06, 0xe483 },
{ 0x06, 0x4fe0 },
{ 0x06, 0x834e },
{ 0x06, 0x7800 },
{ 0x06, 0x9f0a },
{ 0x06, 0xe083 },
{ 0x06, 0x4fa0 },
{ 0x06, 0x10a5 },
{ 0x06, 0xee83 },
{ 0x06, 0x4e01 },
{ 0x06, 0xe083 },
{ 0x06, 0x4e78 },
{ 0x06, 0x059e },
{ 0x06, 0x9ae0 },
{ 0x06, 0x834e },
{ 0x06, 0x7804 },
{ 0x06, 0x9e10 },
{ 0x06, 0xe083 },
{ 0x06, 0x4e78 },
{ 0x06, 0x039e },
{ 0x06, 0x0fe0 },
{ 0x06, 0x834e },
{ 0x06, 0x7801 },
{ 0x06, 0x9e05 },
{ 0x06, 0xae0c },
{ 0x06, 0xaf81 },
{ 0x06, 0xa7af },
{ 0x06, 0x8152 },
{ 0x06, 0xaf81 },
{ 0x06, 0x8baf },
{ 0x06, 0x81c2 },
{ 0x06, 0xee83 },
{ 0x06, 0x4800 },
{ 0x06, 0xee83 },
{ 0x06, 0x4900 },
{ 0x06, 0xe083 },
{ 0x06, 0x5110 },
{ 0x06, 0xe483 },
{ 0x06, 0x5158 },
{ 0x06, 0x019f },
{ 0x06, 0xead0 },
{ 0x06, 0x00d1 },
{ 0x06, 0x801f },
{ 0x06, 0x66e2 },
{ 0x06, 0xf8ea },
{ 0x06, 0xe3f8 },
{ 0x06, 0xeb5a },
{ 0x06, 0xf81e },
{ 0x06, 0x20e6 },
{ 0x06, 0xf8ea },
{ 0x06, 0xe5f8 },
{ 0x06, 0xebd3 },
{ 0x06, 0x02b3 },
{ 0x06, 0xfee2 },
{ 0x06, 0xf87c },
{ 0x06, 0xef32 },
{ 0x06, 0x5b80 },
{ 0x06, 0xe3f8 },
{ 0x06, 0x7d9e },
{ 0x06, 0x037d },
{ 0x06, 0xffff },
{ 0x06, 0x0d58 },
{ 0x06, 0x1c55 },
{ 0x06, 0x1a65 },
{ 0x06, 0x11a1 },
{ 0x06, 0x90d3 },
{ 0x06, 0xe283 },
{ 0x06, 0x48e3 },
{ 0x06, 0x8349 },
{ 0x06, 0x1b56 },
{ 0x06, 0xab08 },
{ 0x06, 0xef56 },
{ 0x06, 0xe683 },
{ 0x06, 0x48e7 },
{ 0x06, 0x8349 },
{ 0x06, 0x10d1 },
{ 0x06, 0x801f },
{ 0x06, 0x66a0 },
{ 0x06, 0x04b9 },
{ 0x06, 0xe283 },
{ 0x06, 0x48e3 },
{ 0x06, 0x8349 },
{ 0x06, 0xef65 },
{ 0x06, 0xe283 },
{ 0x06, 0x4ae3 },
{ 0x06, 0x834b },
{ 0x06, 0x1b56 },
{ 0x06, 0xaa0e },
{ 0x06, 0xef56 },
{ 0x06, 0xe683 },
{ 0x06, 0x4ae7 },
{ 0x06, 0x834b },
{ 0x06, 0xe283 },
{ 0x06, 0x4de6 },
{ 0x06, 0x834c },
{ 0x06, 0xe083 },
{ 0x06, 0x4da0 },
{ 0x06, 0x000c },
{ 0x06, 0xaf81 },
{ 0x06, 0x8be0 },
{ 0x06, 0x834d },
{ 0x06, 0x10e4 },
{ 0x06, 0x834d },
{ 0x06, 0xae04 },
{ 0x06, 0x80e4 },
{ 0x06, 0x834d },
{ 0x06, 0xe083 },
{ 0x06, 0x4e78 },
{ 0x06, 0x039e },
{ 0x06, 0x0be0 },
{ 0x06, 0x834e },
{ 0x06, 0x7804 },
{ 0x06, 0x9e04 },
{ 0x06, 0xee83 },
{ 0x06, 0x4e02 },
{ 0x06, 0xe083 },
{ 0x06, 0x32e1 },
{ 0x06, 0x8333 },
{ 0x06, 0x590f },
{ 0x06, 0xe283 },
{ 0x06, 0x4d0c },
{ 0x06, 0x245a },
{ 0x06, 0xf01e },
{ 0x06, 0x12e4 },
{ 0x06, 0xf88c },
{ 0x06, 0xe5f8 },
{ 0x06, 0x8de0 },
{ 0x06, 0x8330 },
{ 0x06, 0xe183 },
{ 0x06, 0x3168 },
{ 0x06, 0x01e4 },
{ 0x06, 0xf88a },
{ 0x06, 0xe5f8 },
{ 0x06, 0x8bae },
{ 0x06, 0x37ee },
{ 0x06, 0x834e },
{ 0x06, 0x03e0 },
{ 0x06, 0x834c },
{ 0x06, 0xe183 },
{ 0x06, 0x4d1b },
{ 0x06, 0x019e },
{ 0x06, 0x04aa },
{ 0x06, 0xa1ae },
{ 0x06, 0xa8ee },
{ 0x06, 0x834e },
{ 0x06, 0x04ee },
{ 0x06, 0x834f },
{ 0x06, 0x00ae },
{ 0x06, 0xabe0 },
{ 0x06, 0x834f },
{ 0x06, 0x7803 },
{ 0x06, 0x9f14 },
{ 0x06, 0xee83 },
{ 0x06, 0x4e05 },
{ 0x06, 0xd240 },
{ 0x06, 0xd655 },
{ 0x06, 0x5402 },
{ 0x06, 0x81c6 },
{ 0x06, 0xd2a0 },
{ 0x06, 0xd6ba },
{ 0x06, 0x0002 },
{ 0x06, 0x81c6 },
{ 0x06, 0xfefd },
{ 0x06, 0xfc05 },
{ 0x06, 0xf8e0 },
{ 0x06, 0xf860 },
{ 0x06, 0xe1f8 },
{ 0x06, 0x6168 },
{ 0x06, 0x02e4 },
{ 0x06, 0xf860 },
{ 0x06, 0xe5f8 },
{ 0x06, 0x61e0 },
{ 0x06, 0xf848 },
{ 0x06, 0xe1f8 },
{ 0x06, 0x4958 },
{ 0x06, 0x0f1e },
{ 0x06, 0x02e4 },
{ 0x06, 0xf848 },
{ 0x06, 0xe5f8 },
{ 0x06, 0x49d0 },
{ 0x06, 0x0002 },
{ 0x06, 0x820a },
{ 0x06, 0xbf83 },
{ 0x06, 0x50ef },
{ 0x06, 0x46dc },
{ 0x06, 0x19dd },
{ 0x06, 0xd001 },
{ 0x06, 0x0282 },
{ 0x06, 0x0a02 },
{ 0x06, 0x8226 },
{ 0x06, 0xe0f8 },
{ 0x06, 0x60e1 },
{ 0x06, 0xf861 },
{ 0x06, 0x58fd },
{ 0x06, 0xe4f8 },
{ 0x06, 0x60e5 },
{ 0x06, 0xf861 },
{ 0x06, 0xfc04 },
{ 0x06, 0xf9fa },
{ 0x06, 0xfbc6 },
{ 0x06, 0xbff8 },
{ 0x06, 0x40be },
{ 0x06, 0x8350 },
{ 0x06, 0xa001 },
{ 0x06, 0x0107 },
{ 0x06, 0x1b89 },
{ 0x06, 0xcfd2 },
{ 0x06, 0x08eb },
{ 0x06, 0xdb19 },
{ 0x06, 0xb2fb },
{ 0x06, 0xfffe },
{ 0x06, 0xfd04 },
{ 0x06, 0xf8e0 },
{ 0x06, 0xf848 },
{ 0x06, 0xe1f8 },
{ 0x06, 0x4968 },
{ 0x06, 0x08e4 },
{ 0x06, 0xf848 },
{ 0x06, 0xe5f8 },
{ 0x06, 0x4958 },
{ 0x06, 0xf7e4 },
{ 0x06, 0xf848 },
{ 0x06, 0xe5f8 },
{ 0x06, 0x49fc },
{ 0x06, 0x044d },
{ 0x06, 0x2000 },
{ 0x06, 0x024e },
{ 0x06, 0x2200 },
{ 0x06, 0x024d },
{ 0x06, 0xdfff },
{ 0x06, 0x014e },
{ 0x06, 0xddff },
{ 0x06, 0x0100 },
{ 0x05, 0x83d8 },
{ 0x06, 0x8000 },
{ 0x03, 0xdc00 },
{ 0x05, 0xfff6 },
{ 0x06, 0x00fc },
{ 0x1f, 0x0000 },
{ 0x1f, 0x0000 },
{ 0x0d, 0xf880 },
{ 0x1f, 0x0000 }
}; };
rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
if (mdio_read(ioaddr, 0x06) == 0xc400) { if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
struct phy_reg phy_reg_init_1[] = { struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
{ 0x05, 0x669a },
{ 0x1f, 0x0005 }, { 0x1f, 0x0005 },
{ 0x01, 0x0300 }, { 0x05, 0x8330 },
{ 0x1f, 0x0000 }, { 0x06, 0x669a },
{ 0x11, 0x401c },
{ 0x16, 0x4100 }, { 0x1f, 0x0002 }
};
int val;
rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
val = mdio_read(ioaddr, 0x0d);
if ((val & 0x00ff) != 0x006c) {
u32 set[] = {
0x0065, 0x0066, 0x0067, 0x0068,
0x0069, 0x006a, 0x006b, 0x006c
};
int i;
mdio_write(ioaddr, 0x1f, 0x0002);
val &= 0xff00;
for (i = 0; i < ARRAY_SIZE(set); i++)
mdio_write(ioaddr, 0x0d, val | set[i]);
}
} else {
struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
{ 0x05, 0x2642 },
{ 0x1f, 0x0005 }, { 0x1f, 0x0005 },
{ 0x07, 0x0010 }, { 0x05, 0x8330 },
{ 0x05, 0x83dc }, { 0x06, 0x2642 }
{ 0x06, 0x087d },
{ 0x05, 0x8300 },
{ 0x06, 0x0101 },
{ 0x06, 0x05f8 },
{ 0x06, 0xf9fa },
{ 0x06, 0xfbef },
{ 0x06, 0x79e2 },
{ 0x06, 0x835f },
{ 0x06, 0xe0f8 },
{ 0x06, 0x9ae1 },
{ 0x06, 0xf89b },
{ 0x06, 0xef31 },
{ 0x06, 0x3b65 },
{ 0x06, 0xaa07 },
{ 0x06, 0x81e4 },
{ 0x06, 0xf89a },
{ 0x06, 0xe5f8 },
{ 0x06, 0x9baf },
{ 0x06, 0x06ae },
{ 0x05, 0x83dc },
{ 0x06, 0x8300 },
}; };
rtl_phy_write(ioaddr, phy_reg_init_1, rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
ARRAY_SIZE(phy_reg_init_1));
} }
mdio_write(ioaddr, 0x1f, 0x0000); mdio_write(ioaddr, 0x1f, 0x0002);
mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600);
mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000);
mdio_write(ioaddr, 0x1f, 0x0001);
mdio_write(ioaddr, 0x17, 0x0cc0);
mdio_write(ioaddr, 0x1f, 0x0002);
mdio_patch(ioaddr, 0x0f, 0x0017);
rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
}
static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr)
{
struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
{ 0x10, 0x0008 },
{ 0x0d, 0x006c },
{ 0x1f, 0x0000 },
{ 0x0d, 0xf880 },
{ 0x1f, 0x0001 },
{ 0x17, 0x0cc0 },
{ 0x1f, 0x0001 },
{ 0x0b, 0xa4d8 },
{ 0x09, 0x281c },
{ 0x07, 0x2883 },
{ 0x0a, 0x6b35 },
{ 0x1d, 0x3da4 },
{ 0x1c, 0xeffd },
{ 0x14, 0x7f52 },
{ 0x18, 0x7fc6 },
{ 0x08, 0x0601 },
{ 0x06, 0x4063 },
{ 0x10, 0xf074 },
{ 0x1f, 0x0003 },
{ 0x13, 0x0789 },
{ 0x12, 0xf4bd },
{ 0x1a, 0x04fd },
{ 0x14, 0x84b0 },
{ 0x1f, 0x0000 },
{ 0x00, 0x9200 },
{ 0x1f, 0x0005 },
{ 0x01, 0x0340 },
{ 0x1f, 0x0001 },
{ 0x04, 0x4000 },
{ 0x03, 0x1d21 },
{ 0x02, 0x0c32 },
{ 0x01, 0x0200 },
{ 0x00, 0x5554 },
{ 0x04, 0x4800 },
{ 0x04, 0x4000 },
{ 0x04, 0xf000 },
{ 0x03, 0xdf01 },
{ 0x02, 0xdf20 },
{ 0x01, 0x101a },
{ 0x00, 0xa0ff },
{ 0x04, 0xf800 },
{ 0x04, 0xf000 },
{ 0x1f, 0x0000 },
{ 0x1f, 0x0007 },
{ 0x1e, 0x0023 },
{ 0x16, 0x0000 },
{ 0x1f, 0x0000 }
};
rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
} }
static void rtl8102e_hw_phy_config(void __iomem *ioaddr) static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
...@@ -1792,7 +2661,13 @@ static void rtl_hw_phy_config(struct net_device *dev) ...@@ -1792,7 +2661,13 @@ static void rtl_hw_phy_config(struct net_device *dev)
rtl8168cp_2_hw_phy_config(ioaddr); rtl8168cp_2_hw_phy_config(ioaddr);
break; break;
case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_25:
rtl8168d_hw_phy_config(ioaddr); rtl8168d_1_hw_phy_config(ioaddr);
break;
case RTL_GIGA_MAC_VER_26:
rtl8168d_2_hw_phy_config(ioaddr);
break;
case RTL_GIGA_MAC_VER_27:
rtl8168d_3_hw_phy_config(ioaddr);
break; break;
default: default:
...@@ -2863,6 +3738,8 @@ static void rtl_hw_start_8168(struct net_device *dev) ...@@ -2863,6 +3738,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
break; break;
case RTL_GIGA_MAC_VER_25: case RTL_GIGA_MAC_VER_25:
case RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_27:
rtl_hw_start_8168d(ioaddr, pdev); rtl_hw_start_8168d(ioaddr, pdev);
break; break;
......
...@@ -62,8 +62,11 @@ static char *devid=NULL; ...@@ -62,8 +62,11 @@ static char *devid=NULL;
static struct usb_eth_dev usb_dev_id[] = { static struct usb_eth_dev usb_dev_id[] = {
#define PEGASUS_DEV(pn, vid, pid, flags) \ #define PEGASUS_DEV(pn, vid, pid, flags) \
{.name = pn, .vendor = vid, .device = pid, .private = flags}, {.name = pn, .vendor = vid, .device = pid, .private = flags},
#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \
PEGASUS_DEV(pn, vid, pid, flags)
#include "pegasus.h" #include "pegasus.h"
#undef PEGASUS_DEV #undef PEGASUS_DEV
#undef PEGASUS_DEV_CLASS
{NULL, 0, 0, 0}, {NULL, 0, 0, 0},
{NULL, 0, 0, 0} {NULL, 0, 0, 0}
}; };
...@@ -71,8 +74,18 @@ static struct usb_eth_dev usb_dev_id[] = { ...@@ -71,8 +74,18 @@ static struct usb_eth_dev usb_dev_id[] = {
static struct usb_device_id pegasus_ids[] = { static struct usb_device_id pegasus_ids[] = {
#define PEGASUS_DEV(pn, vid, pid, flags) \ #define PEGASUS_DEV(pn, vid, pid, flags) \
{.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid}, {.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid},
/*
* The Belkin F8T012xx1 bluetooth adaptor has the same vendor and product
* IDs as the Belkin F5D5050, so we need to teach the pegasus driver to
* ignore adaptors belonging to the "Wireless" class 0xE0. For this one
* case anyway, seeing as the pegasus is for "Wired" adaptors.
*/
#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \
{.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_CLASS), \
.idVendor = vid, .idProduct = pid, .bDeviceClass = dclass},
#include "pegasus.h" #include "pegasus.h"
#undef PEGASUS_DEV #undef PEGASUS_DEV
#undef PEGASUS_DEV_CLASS
{}, {},
{} {}
}; };
......
...@@ -202,7 +202,11 @@ PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701, ...@@ -202,7 +202,11 @@ PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701,
DEFAULT_GPIO_RESET | PEGASUS_II ) DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100, PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100,
DEFAULT_GPIO_RESET | PEGASUS_II ) DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, /*
* Distinguish between this Belkin adaptor and the Belkin bluetooth adaptors
* with the same product IDs by checking the device class too.
*/
PEGASUS_DEV_CLASS( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00,
DEFAULT_GPIO_RESET | PEGASUS_II ) DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986, PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986,
DEFAULT_GPIO_RESET ) DEFAULT_GPIO_RESET )
......
################################################################################
#
# Linux driver for VMware's vmxnet3 ethernet NIC.
#
# Copyright (C) 2007-2009, VMware, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 2 of the License and no later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
# NON INFRINGEMENT. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
#
#
################################################################################
#
# Makefile for the VMware vmxnet3 ethernet NIC driver
#
obj-$(CONFIG_VMXNET3) += vmxnet3.o
vmxnet3-objs := vmxnet3_drv.o vmxnet3_ethtool.o
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
*
*/
#ifndef _UPT1_DEFS_H
#define _UPT1_DEFS_H
struct UPT1_TxStats {
u64 TSOPktsTxOK; /* TSO pkts post-segmentation */
u64 TSOBytesTxOK;
u64 ucastPktsTxOK;
u64 ucastBytesTxOK;
u64 mcastPktsTxOK;
u64 mcastBytesTxOK;
u64 bcastPktsTxOK;
u64 bcastBytesTxOK;
u64 pktsTxError;
u64 pktsTxDiscard;
};
struct UPT1_RxStats {
u64 LROPktsRxOK; /* LRO pkts */
u64 LROBytesRxOK; /* bytes from LRO pkts */
/* the following counters are for pkts from the wire, i.e., pre-LRO */
u64 ucastPktsRxOK;
u64 ucastBytesRxOK;
u64 mcastPktsRxOK;
u64 mcastBytesRxOK;
u64 bcastPktsRxOK;
u64 bcastBytesRxOK;
u64 pktsRxOutOfBuf;
u64 pktsRxError;
};
/* interrupt moderation level */
enum {
UPT1_IML_NONE = 0, /* no interrupt moderation */
UPT1_IML_HIGHEST = 7, /* least intr generated */
UPT1_IML_ADAPTIVE = 8, /* adpative intr moderation */
};
/* values for UPT1_RSSConf.hashFunc */
enum {
UPT1_RSS_HASH_TYPE_NONE = 0x0,
UPT1_RSS_HASH_TYPE_IPV4 = 0x01,
UPT1_RSS_HASH_TYPE_TCP_IPV4 = 0x02,
UPT1_RSS_HASH_TYPE_IPV6 = 0x04,
UPT1_RSS_HASH_TYPE_TCP_IPV6 = 0x08,
};
enum {
UPT1_RSS_HASH_FUNC_NONE = 0x0,
UPT1_RSS_HASH_FUNC_TOEPLITZ = 0x01,
};
#define UPT1_RSS_MAX_KEY_SIZE 40
#define UPT1_RSS_MAX_IND_TABLE_SIZE 128
struct UPT1_RSSConf {
u16 hashType;
u16 hashFunc;
u16 hashKeySize;
u16 indTableSize;
u8 hashKey[UPT1_RSS_MAX_KEY_SIZE];
u8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE];
};
/* features */
enum {
UPT1_F_RXCSUM = 0x0001, /* rx csum verification */
UPT1_F_RSS = 0x0002,
UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */
UPT1_F_LRO = 0x0008,
};
#endif
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
*
*/
#ifndef _VMXNET3_DEFS_H_
#define _VMXNET3_DEFS_H_
#include "upt1_defs.h"
/* all registers are 32 bit wide */
/* BAR 1 */
enum {
VMXNET3_REG_VRRS = 0x0, /* Vmxnet3 Revision Report Selection */
VMXNET3_REG_UVRS = 0x8, /* UPT Version Report Selection */
VMXNET3_REG_DSAL = 0x10, /* Driver Shared Address Low */
VMXNET3_REG_DSAH = 0x18, /* Driver Shared Address High */
VMXNET3_REG_CMD = 0x20, /* Command */
VMXNET3_REG_MACL = 0x28, /* MAC Address Low */
VMXNET3_REG_MACH = 0x30, /* MAC Address High */
VMXNET3_REG_ICR = 0x38, /* Interrupt Cause Register */
VMXNET3_REG_ECR = 0x40 /* Event Cause Register */
};
/* BAR 0 */
enum {
VMXNET3_REG_IMR = 0x0, /* Interrupt Mask Register */
VMXNET3_REG_TXPROD = 0x600, /* Tx Producer Index */
VMXNET3_REG_RXPROD = 0x800, /* Rx Producer Index for ring 1 */
VMXNET3_REG_RXPROD2 = 0xA00 /* Rx Producer Index for ring 2 */
};
#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */
#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */
#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */
#define VMXNET3_REG_ALIGN_MASK 0x7
/* I/O Mapped access to registers */
#define VMXNET3_IO_TYPE_PT 0
#define VMXNET3_IO_TYPE_VD 1
#define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF))
#define VMXNET3_IO_TYPE(addr) ((addr) >> 24)
#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF)
enum {
VMXNET3_CMD_FIRST_SET = 0xCAFE0000,
VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET,
VMXNET3_CMD_QUIESCE_DEV,
VMXNET3_CMD_RESET_DEV,
VMXNET3_CMD_UPDATE_RX_MODE,
VMXNET3_CMD_UPDATE_MAC_FILTERS,
VMXNET3_CMD_UPDATE_VLAN_FILTERS,
VMXNET3_CMD_UPDATE_RSSIDT,
VMXNET3_CMD_UPDATE_IML,
VMXNET3_CMD_UPDATE_PMCFG,
VMXNET3_CMD_UPDATE_FEATURE,
VMXNET3_CMD_LOAD_PLUGIN,
VMXNET3_CMD_FIRST_GET = 0xF00D0000,
VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
VMXNET3_CMD_GET_STATS,
VMXNET3_CMD_GET_LINK,
VMXNET3_CMD_GET_PERM_MAC_LO,
VMXNET3_CMD_GET_PERM_MAC_HI,
VMXNET3_CMD_GET_DID_LO,
VMXNET3_CMD_GET_DID_HI,
VMXNET3_CMD_GET_DEV_EXTRA_INFO,
VMXNET3_CMD_GET_CONF_INTR
};
struct Vmxnet3_TxDesc {
u64 addr;
u32 len:14;
u32 gen:1; /* generation bit */
u32 rsvd:1;
u32 dtype:1; /* descriptor type */
u32 ext1:1;
u32 msscof:14; /* MSS, checksum offset, flags */
u32 hlen:10; /* header len */
u32 om:2; /* offload mode */
u32 eop:1; /* End Of Packet */
u32 cq:1; /* completion request */
u32 ext2:1;
u32 ti:1; /* VLAN Tag Insertion */
u32 tci:16; /* Tag to Insert */
};
/* TxDesc.OM values */
#define VMXNET3_OM_NONE 0
#define VMXNET3_OM_CSUM 2
#define VMXNET3_OM_TSO 3
/* fields in TxDesc we access w/o using bit fields */
#define VMXNET3_TXD_EOP_SHIFT 12
#define VMXNET3_TXD_CQ_SHIFT 13
#define VMXNET3_TXD_GEN_SHIFT 14
#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT)
#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT)
#define VMXNET3_HDR_COPY_SIZE 128
struct Vmxnet3_TxDataDesc {
u8 data[VMXNET3_HDR_COPY_SIZE];
};
struct Vmxnet3_TxCompDesc {
u32 txdIdx:12; /* Index of the EOP TxDesc */
u32 ext1:20;
u32 ext2;
u32 ext3;
u32 rsvd:24;
u32 type:7; /* completion type */
u32 gen:1; /* generation bit */
};
struct Vmxnet3_RxDesc {
u64 addr;
u32 len:14;
u32 btype:1; /* Buffer Type */
u32 dtype:1; /* Descriptor type */
u32 rsvd:15;
u32 gen:1; /* Generation bit */
u32 ext1;
};
/* values of RXD.BTYPE */
#define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */
#define VMXNET3_RXD_BTYPE_BODY 1 /* body only */
/* fields in RxDesc we access w/o using bit fields */
#define VMXNET3_RXD_BTYPE_SHIFT 14
#define VMXNET3_RXD_GEN_SHIFT 31
struct Vmxnet3_RxCompDesc {
u32 rxdIdx:12; /* Index of the RxDesc */
u32 ext1:2;
u32 eop:1; /* End of Packet */
u32 sop:1; /* Start of Packet */
u32 rqID:10; /* rx queue/ring ID */
u32 rssType:4; /* RSS hash type used */
u32 cnc:1; /* Checksum Not Calculated */
u32 ext2:1;
u32 rssHash; /* RSS hash value */
u32 len:14; /* data length */
u32 err:1; /* Error */
u32 ts:1; /* Tag is stripped */
u32 tci:16; /* Tag stripped */
u32 csum:16;
u32 tuc:1; /* TCP/UDP Checksum Correct */
u32 udp:1; /* UDP packet */
u32 tcp:1; /* TCP packet */
u32 ipc:1; /* IP Checksum Correct */
u32 v6:1; /* IPv6 */
u32 v4:1; /* IPv4 */
u32 frg:1; /* IP Fragment */
u32 fcs:1; /* Frame CRC correct */
u32 type:7; /* completion type */
u32 gen:1; /* generation bit */
};
/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
#define VMXNET3_RCD_TUC_SHIFT 16
#define VMXNET3_RCD_IPC_SHIFT 19
/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */
#define VMXNET3_RCD_TYPE_SHIFT 56
#define VMXNET3_RCD_GEN_SHIFT 63
/* csum OK for TCP/UDP pkts over IP */
#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \
1 << VMXNET3_RCD_IPC_SHIFT)
/* value of RxCompDesc.rssType */
enum {
VMXNET3_RCD_RSS_TYPE_NONE = 0,
VMXNET3_RCD_RSS_TYPE_IPV4 = 1,
VMXNET3_RCD_RSS_TYPE_TCPIPV4 = 2,
VMXNET3_RCD_RSS_TYPE_IPV6 = 3,
VMXNET3_RCD_RSS_TYPE_TCPIPV6 = 4,
};
/* a union for accessing all cmd/completion descriptors */
union Vmxnet3_GenericDesc {
u64 qword[2];
u32 dword[4];
u16 word[8];
struct Vmxnet3_TxDesc txd;
struct Vmxnet3_RxDesc rxd;
struct Vmxnet3_TxCompDesc tcd;
struct Vmxnet3_RxCompDesc rcd;
};
#define VMXNET3_INIT_GEN 1
/* Max size of a single tx buffer */
#define VMXNET3_MAX_TX_BUF_SIZE (1 << 14)
/* # of tx desc needed for a tx buffer size */
#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / \
VMXNET3_MAX_TX_BUF_SIZE)
/* max # of tx descs for a non-tso pkt */
#define VMXNET3_MAX_TXD_PER_PKT 16
/* Max size of a single rx buffer */
#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1)
/* Minimum size of a type 0 buffer */
#define VMXNET3_MIN_T0_BUF_SIZE 128
#define VMXNET3_MAX_CSUM_OFFSET 1024
/* Ring base address alignment */
#define VMXNET3_RING_BA_ALIGN 512
#define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1)
/* Ring size must be a multiple of 32 */
#define VMXNET3_RING_SIZE_ALIGN 32
#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
/* Max ring size */
#define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096
#define VMXNET3_RX_RING_MAX_SIZE 4096
#define VMXNET3_RC_RING_MAX_SIZE 8192
/* a list of reasons for queue stop */
enum {
VMXNET3_ERR_NOEOP = 0x80000000, /* cannot find the EOP desc of a pkt */
VMXNET3_ERR_TXD_REUSE = 0x80000001, /* reuse TxDesc before tx completion */
VMXNET3_ERR_BIG_PKT = 0x80000002, /* too many TxDesc for a pkt */
VMXNET3_ERR_DESC_NOT_SPT = 0x80000003, /* descriptor type not supported */
VMXNET3_ERR_SMALL_BUF = 0x80000004, /* type 0 buffer too small */
VMXNET3_ERR_STRESS = 0x80000005, /* stress option firing in vmkernel */
VMXNET3_ERR_SWITCH = 0x80000006, /* mode switch failure */
VMXNET3_ERR_TXD_INVALID = 0x80000007, /* invalid TxDesc */
};
/* completion descriptor types */
#define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */
#define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */
enum {
VMXNET3_GOS_BITS_UNK = 0, /* unknown */
VMXNET3_GOS_BITS_32 = 1,
VMXNET3_GOS_BITS_64 = 2,
};
#define VMXNET3_GOS_TYPE_LINUX 1
struct Vmxnet3_GOSInfo {
u32 gosBits:2; /* 32-bit or 64-bit? */
u32 gosType:4; /* which guest */
u32 gosVer:16; /* gos version */
u32 gosMisc:10; /* other info about gos */
};
struct Vmxnet3_DriverInfo {
u32 version;
struct Vmxnet3_GOSInfo gos;
u32 vmxnet3RevSpt;
u32 uptVerSpt;
};
#define VMXNET3_REV1_MAGIC 0xbabefee1
/*
* QueueDescPA must be 128 bytes aligned. It points to an array of
* Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc.
* The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by
* Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively.
*/
#define VMXNET3_QUEUE_DESC_ALIGN 128
struct Vmxnet3_MiscConf {
struct Vmxnet3_DriverInfo driverInfo;
u64 uptFeatures;
u64 ddPA; /* driver data PA */
u64 queueDescPA; /* queue descriptor table PA */
u32 ddLen; /* driver data len */
u32 queueDescLen; /* queue desc. table len in bytes */
u32 mtu;
u16 maxNumRxSG;
u8 numTxQueues;
u8 numRxQueues;
u32 reserved[4];
};
struct Vmxnet3_TxQueueConf {
u64 txRingBasePA;
u64 dataRingBasePA;
u64 compRingBasePA;
u64 ddPA; /* driver data */
u64 reserved;
u32 txRingSize; /* # of tx desc */
u32 dataRingSize; /* # of data desc */
u32 compRingSize; /* # of comp desc */
u32 ddLen; /* size of driver data */
u8 intrIdx;
u8 _pad[7];
};
struct Vmxnet3_RxQueueConf {
u64 rxRingBasePA[2];
u64 compRingBasePA;
u64 ddPA; /* driver data */
u64 reserved;
u32 rxRingSize[2]; /* # of rx desc */
u32 compRingSize; /* # of rx comp desc */
u32 ddLen; /* size of driver data */
u8 intrIdx;
u8 _pad[7];
};
enum vmxnet3_intr_mask_mode {
VMXNET3_IMM_AUTO = 0,
VMXNET3_IMM_ACTIVE = 1,
VMXNET3_IMM_LAZY = 2
};
enum vmxnet3_intr_type {
VMXNET3_IT_AUTO = 0,
VMXNET3_IT_INTX = 1,
VMXNET3_IT_MSI = 2,
VMXNET3_IT_MSIX = 3
};
#define VMXNET3_MAX_TX_QUEUES 8
#define VMXNET3_MAX_RX_QUEUES 16
/* addition 1 for events */
#define VMXNET3_MAX_INTRS 25
struct Vmxnet3_IntrConf {
bool autoMask;
u8 numIntrs; /* # of interrupts */
u8 eventIntrIdx;
u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for
* each intr */
u32 reserved[3];
};
/* one bit per VLAN ID, the size is in the units of u32 */
#define VMXNET3_VFT_SIZE (4096 / (sizeof(u32) * 8))
struct Vmxnet3_QueueStatus {
bool stopped;
u8 _pad[3];
u32 error;
};
struct Vmxnet3_TxQueueCtrl {
u32 txNumDeferred;
u32 txThreshold;
u64 reserved;
};
struct Vmxnet3_RxQueueCtrl {
bool updateRxProd;
u8 _pad[7];
u64 reserved;
};
enum {
VMXNET3_RXM_UCAST = 0x01, /* unicast only */
VMXNET3_RXM_MCAST = 0x02, /* multicast passing the filters */
VMXNET3_RXM_BCAST = 0x04, /* broadcast only */
VMXNET3_RXM_ALL_MULTI = 0x08, /* all multicast */
VMXNET3_RXM_PROMISC = 0x10 /* promiscuous */
};
struct Vmxnet3_RxFilterConf {
u32 rxMode; /* VMXNET3_RXM_xxx */
u16 mfTableLen; /* size of the multicast filter table */
u16 _pad1;
u64 mfTablePA; /* PA of the multicast filters table */
u32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
};
#define VMXNET3_PM_MAX_FILTERS 6
#define VMXNET3_PM_MAX_PATTERN_SIZE 128
#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */
#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching
* filters */
struct Vmxnet3_PM_PktFilter {
u8 maskSize;
u8 patternSize;
u8 mask[VMXNET3_PM_MAX_MASK_SIZE];
u8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE];
u8 pad[6];
};
struct Vmxnet3_PMConf {
u16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */
u8 numFilters;
u8 pad[5];
struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
};
struct Vmxnet3_VariableLenConfDesc {
u32 confVer;
u32 confLen;
u64 confPA;
};
struct Vmxnet3_TxQueueDesc {
struct Vmxnet3_TxQueueCtrl ctrl;
struct Vmxnet3_TxQueueConf conf;
/* Driver read after a GET command */
struct Vmxnet3_QueueStatus status;
struct UPT1_TxStats stats;
u8 _pad[88]; /* 128 aligned */
};
struct Vmxnet3_RxQueueDesc {
struct Vmxnet3_RxQueueCtrl ctrl;
struct Vmxnet3_RxQueueConf conf;
/* Driver read after a GET commad */
struct Vmxnet3_QueueStatus status;
struct UPT1_RxStats stats;
u8 __pad[88]; /* 128 aligned */
};
struct Vmxnet3_DSDevRead {
/* read-only region for device, read by dev in response to a SET cmd */
struct Vmxnet3_MiscConf misc;
struct Vmxnet3_IntrConf intrConf;
struct Vmxnet3_RxFilterConf rxFilterConf;
struct Vmxnet3_VariableLenConfDesc rssConfDesc;
struct Vmxnet3_VariableLenConfDesc pmConfDesc;
struct Vmxnet3_VariableLenConfDesc pluginConfDesc;
};
/* All structures in DriverShared are padded to multiples of 8 bytes */
struct Vmxnet3_DriverShared {
u32 magic;
/* make devRead start at 64bit boundaries */
u32 pad;
struct Vmxnet3_DSDevRead devRead;
u32 ecr;
u32 reserved[5];
};
#define VMXNET3_ECR_RQERR (1 << 0)
#define VMXNET3_ECR_TQERR (1 << 1)
#define VMXNET3_ECR_LINK (1 << 2)
#define VMXNET3_ECR_DIC (1 << 3)
#define VMXNET3_ECR_DEBUG (1 << 4)
/* flip the gen bit of a ring */
#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1)
/* only use this if moving the idx won't affect the gen bit */
#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \
do {\
(idx)++;\
if (unlikely((idx) == (ring_size))) {\
(idx) = 0;\
} \
} while (0)
#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \
(vfTable[vid >> 5] |= (1 << (vid & 31)))
#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \
(vfTable[vid >> 5] &= ~(1 << (vid & 31)))
#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \
((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
#define VMXNET3_MAX_MTU 9000
#define VMXNET3_MIN_MTU 60
#define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */
#define VMXNET3_LINK_DOWN 0
#endif /* _VMXNET3_DEFS_H_ */
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
*
*/
#include "vmxnet3_int.h"
char vmxnet3_driver_name[] = "vmxnet3";
#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
/*
* PCI Device ID Table
* Last entry must be all 0s
*/
static const struct pci_device_id vmxnet3_pciid_table[] = {
{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
{0}
};
MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
static atomic_t devices_found;
/*
* Enable/Disable the given intr
*/
static void
vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
{
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
}
static void
vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
{
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
}
/*
* Enable/Disable all intrs used by the device
*/
static void
vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->intr.num_intrs; i++)
vmxnet3_enable_intr(adapter, i);
}
static void
vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->intr.num_intrs; i++)
vmxnet3_disable_intr(adapter, i);
}
static void
vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
{
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
}
static bool
vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
return netif_queue_stopped(adapter->netdev);
}
static void
vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = false;
netif_start_queue(adapter->netdev);
}
static void
vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = false;
netif_wake_queue(adapter->netdev);
}
static void
vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = true;
tq->num_stop++;
netif_stop_queue(adapter->netdev);
}
/*
* Check the link state. This may start or stop the tx queue.
*/
static void
vmxnet3_check_link(struct vmxnet3_adapter *adapter)
{
u32 ret;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
adapter->link_speed = ret >> 16;
if (ret & 1) { /* Link is up. */
printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
adapter->netdev->name, adapter->link_speed);
if (!netif_carrier_ok(adapter->netdev))
netif_carrier_on(adapter->netdev);
vmxnet3_tq_start(&adapter->tx_queue, adapter);
} else {
printk(KERN_INFO "%s: NIC Link is Down\n",
adapter->netdev->name);
if (netif_carrier_ok(adapter->netdev))
netif_carrier_off(adapter->netdev);
vmxnet3_tq_stop(&adapter->tx_queue, adapter);
}
}
static void
vmxnet3_process_events(struct vmxnet3_adapter *adapter)
{
u32 events = adapter->shared->ecr;
if (!events)
return;
vmxnet3_ack_events(adapter, events);
/* Check if link state has changed */
if (events & VMXNET3_ECR_LINK)
vmxnet3_check_link(adapter);
/* Check if there is an error on xmit/recv queues */
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_QUEUE_STATUS);
if (adapter->tqd_start->status.stopped) {
printk(KERN_ERR "%s: tq error 0x%x\n",
adapter->netdev->name,
adapter->tqd_start->status.error);
}
if (adapter->rqd_start->status.stopped) {
printk(KERN_ERR "%s: rq error 0x%x\n",
adapter->netdev->name,
adapter->rqd_start->status.error);
}
schedule_work(&adapter->work);
}
}
static void
vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
struct pci_dev *pdev)
{
if (tbi->map_type == VMXNET3_MAP_SINGLE)
pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
PCI_DMA_TODEVICE);
else if (tbi->map_type == VMXNET3_MAP_PAGE)
pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
PCI_DMA_TODEVICE);
else
BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
}
static int
vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
{
struct sk_buff *skb;
int entries = 0;
/* no out of order completion */
BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1);
skb = tq->buf_info[eop_idx].skb;
BUG_ON(skb == NULL);
tq->buf_info[eop_idx].skb = NULL;
VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
while (tq->tx_ring.next2comp != eop_idx) {
vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
pdev);
/* update next2comp w/o tx_lock. Since we are marking more,
* instead of less, tx ring entries avail, the worst case is
* that the tx routine incorrectly re-queues a pkt due to
* insufficient tx ring entries.
*/
vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
entries++;
}
dev_kfree_skb_any(skb);
return entries;
}
static int
vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
int completed = 0;
union Vmxnet3_GenericDesc *gdesc;
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
while (gdesc->tcd.gen == tq->comp_ring.gen) {
completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq,
adapter->pdev, adapter);
vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
}
if (completed) {
spin_lock(&tq->tx_lock);
if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
netif_carrier_ok(adapter->netdev))) {
vmxnet3_tq_wake(tq, adapter);
}
spin_unlock(&tq->tx_lock);
}
return completed;
}
static void
vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
int i;
while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
struct vmxnet3_tx_buf_info *tbi;
union Vmxnet3_GenericDesc *gdesc;
tbi = tq->buf_info + tq->tx_ring.next2comp;
gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
if (tbi->skb) {
dev_kfree_skb_any(tbi->skb);
tbi->skb = NULL;
}
vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
}
/* sanity check, verify all buffers are indeed unmapped and freed */
for (i = 0; i < tq->tx_ring.size; i++) {
BUG_ON(tq->buf_info[i].skb != NULL ||
tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
}
tq->tx_ring.gen = VMXNET3_INIT_GEN;
tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
tq->comp_ring.gen = VMXNET3_INIT_GEN;
tq->comp_ring.next2proc = 0;
}
void
vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
if (tq->tx_ring.base) {
pci_free_consistent(adapter->pdev, tq->tx_ring.size *
sizeof(struct Vmxnet3_TxDesc),
tq->tx_ring.base, tq->tx_ring.basePA);
tq->tx_ring.base = NULL;
}
if (tq->data_ring.base) {
pci_free_consistent(adapter->pdev, tq->data_ring.size *
sizeof(struct Vmxnet3_TxDataDesc),
tq->data_ring.base, tq->data_ring.basePA);
tq->data_ring.base = NULL;
}
if (tq->comp_ring.base) {
pci_free_consistent(adapter->pdev, tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc),
tq->comp_ring.base, tq->comp_ring.basePA);
tq->comp_ring.base = NULL;
}
kfree(tq->buf_info);
tq->buf_info = NULL;
}
static void
vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
int i;
/* reset the tx ring contents to 0 and reset the tx ring states */
memset(tq->tx_ring.base, 0, tq->tx_ring.size *
sizeof(struct Vmxnet3_TxDesc));
tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
tq->tx_ring.gen = VMXNET3_INIT_GEN;
memset(tq->data_ring.base, 0, tq->data_ring.size *
sizeof(struct Vmxnet3_TxDataDesc));
/* reset the tx comp ring contents to 0 and reset comp ring states */
memset(tq->comp_ring.base, 0, tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc));
tq->comp_ring.next2proc = 0;
tq->comp_ring.gen = VMXNET3_INIT_GEN;
/* reset the bookkeeping data */
memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
for (i = 0; i < tq->tx_ring.size; i++)
tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
/* stats are not reset */
}
static int
vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
tq->comp_ring.base || tq->buf_info);
tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
* sizeof(struct Vmxnet3_TxDesc),
&tq->tx_ring.basePA);
if (!tq->tx_ring.base) {
printk(KERN_ERR "%s: failed to allocate tx ring\n",
adapter->netdev->name);
goto err;
}
tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
tq->data_ring.size *
sizeof(struct Vmxnet3_TxDataDesc),
&tq->data_ring.basePA);
if (!tq->data_ring.base) {
printk(KERN_ERR "%s: failed to allocate data ring\n",
adapter->netdev->name);
goto err;
}
tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc),
&tq->comp_ring.basePA);
if (!tq->comp_ring.base) {
printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
adapter->netdev->name);
goto err;
}
tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
GFP_KERNEL);
if (!tq->buf_info) {
printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
adapter->netdev->name);
goto err;
}
return 0;
err:
vmxnet3_tq_destroy(tq, adapter);
return -ENOMEM;
}
/*
* starting from ring->next2fill, allocate rx buffers for the given ring
* of the rx queue and update the rx desc. stop after @num_to_alloc buffers
* are allocated or allocation fails
*/
static int
vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
int num_to_alloc, struct vmxnet3_adapter *adapter)
{
int num_allocated = 0;
struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
u32 val;
while (num_allocated < num_to_alloc) {
struct vmxnet3_rx_buf_info *rbi;
union Vmxnet3_GenericDesc *gd;
rbi = rbi_base + ring->next2fill;
gd = ring->base + ring->next2fill;
if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
if (rbi->skb == NULL) {
rbi->skb = dev_alloc_skb(rbi->len +
NET_IP_ALIGN);
if (unlikely(rbi->skb == NULL)) {
rq->stats.rx_buf_alloc_failure++;
break;
}
rbi->skb->dev = adapter->netdev;
skb_reserve(rbi->skb, NET_IP_ALIGN);
rbi->dma_addr = pci_map_single(adapter->pdev,
rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE);
} else {
/* rx buffer skipped by the device */
}
val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
} else {
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
rbi->len != PAGE_SIZE);
if (rbi->page == NULL) {
rbi->page = alloc_page(GFP_ATOMIC);
if (unlikely(rbi->page == NULL)) {
rq->stats.rx_buf_alloc_failure++;
break;
}
rbi->dma_addr = pci_map_page(adapter->pdev,
rbi->page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
} else {
/* rx buffers skipped by the device */
}
val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
}
BUG_ON(rbi->dma_addr == 0);
gd->rxd.addr = rbi->dma_addr;
gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val |
rbi->len;
num_allocated++;
vmxnet3_cmd_ring_adv_next2fill(ring);
}
rq->uncommitted[ring_idx] += num_allocated;
dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
"%u, uncommited %u\n", num_allocated, ring->next2fill,
ring->next2comp, rq->uncommitted[ring_idx]);
/* so that the device can distinguish a full ring and an empty ring */
BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
return num_allocated;
}
static void
vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
struct vmxnet3_rx_buf_info *rbi)
{
struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
skb_shinfo(skb)->nr_frags;
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
frag->page = rbi->page;
frag->page_offset = 0;
frag->size = rcd->len;
skb->data_len += frag->size;
skb_shinfo(skb)->nr_frags++;
}
static void
vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
struct vmxnet3_adapter *adapter)
{
u32 dw2, len;
unsigned long buf_offset;
int i;
union Vmxnet3_GenericDesc *gdesc;
struct vmxnet3_tx_buf_info *tbi = NULL;
BUG_ON(ctx->copy_size > skb_headlen(skb));
/* use the previous gen bit for the SOP desc */
dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
gdesc = ctx->sop_txd; /* both loops below can be skipped */
/* no need to map the buffer if headers are copied */
if (ctx->copy_size) {
ctx->sop_txd->txd.addr = tq->data_ring.basePA +
tq->tx_ring.next2fill *
sizeof(struct Vmxnet3_TxDataDesc);
ctx->sop_txd->dword[2] = dw2 | ctx->copy_size;
ctx->sop_txd->dword[3] = 0;
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_NONE;
dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
/* use the right gen for non-SOP desc */
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
}
/* linear part can use multiple tx desc if it's big */
len = skb_headlen(skb) - ctx->copy_size;
buf_offset = ctx->copy_size;
while (len) {
u32 buf_size;
buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ?
VMXNET3_MAX_TX_BUF_SIZE : len;
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_SINGLE;
tbi->dma_addr = pci_map_single(adapter->pdev,
skb->data + buf_offset, buf_size,
PCI_DMA_TODEVICE);
tbi->len = buf_size; /* this automatically convert 2^14 to 0 */
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
gdesc->txd.addr = tbi->dma_addr;
gdesc->dword[2] = dw2 | buf_size;
gdesc->dword[3] = 0;
dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
tq->tx_ring.next2fill, gdesc->txd.addr,
gdesc->dword[2], gdesc->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
len -= buf_size;
buf_offset += buf_size;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_PAGE;
tbi->dma_addr = pci_map_page(adapter->pdev, frag->page,
frag->page_offset, frag->size,
PCI_DMA_TODEVICE);
tbi->len = frag->size;
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
gdesc->txd.addr = tbi->dma_addr;
gdesc->dword[2] = dw2 | frag->size;
gdesc->dword[3] = 0;
dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n",
tq->tx_ring.next2fill, gdesc->txd.addr,
gdesc->dword[2], gdesc->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
}
ctx->eop_txd = gdesc;
/* set the last buf_info for the pkt */
tbi->skb = skb;
tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
}
/*
* parse and copy relevant protocol headers:
* For a tso pkt, relevant headers are L2/3/4 including options
* For a pkt requesting csum offloading, they are L2/3 and may include L4
* if it's a TCP/UDP pkt
*
* Returns:
* -1: error happens during parsing
* 0: protocol headers parsed, but too big to be copied
* 1: protocol headers parsed and copied
*
* Other effects:
* 1. related *ctx fields are updated.
* 2. ctx->copy_size is # of bytes copied
* 3. the portion copied is guaranteed to be in the linear part
*
*/
static int
vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
struct vmxnet3_tx_ctx *ctx,
struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_TxDataDesc *tdd;
if (ctx->mss) {
ctx->eth_ip_hdr_size = skb_transport_offset(skb);
ctx->l4_hdr_size = ((struct tcphdr *)
skb_transport_header(skb))->doff * 4;
ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
} else {
unsigned int pull_size;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
ctx->eth_ip_hdr_size = skb_transport_offset(skb);
if (ctx->ipv4) {
struct iphdr *iph = (struct iphdr *)
skb_network_header(skb);
if (iph->protocol == IPPROTO_TCP) {
pull_size = ctx->eth_ip_hdr_size +
sizeof(struct tcphdr);
if (unlikely(!pskb_may_pull(skb,
pull_size))) {
goto err;
}
ctx->l4_hdr_size = ((struct tcphdr *)
skb_transport_header(skb))->doff * 4;
} else if (iph->protocol == IPPROTO_UDP) {
ctx->l4_hdr_size =
sizeof(struct udphdr);
} else {
ctx->l4_hdr_size = 0;
}
} else {
/* for simplicity, don't copy L4 headers */
ctx->l4_hdr_size = 0;
}
ctx->copy_size = ctx->eth_ip_hdr_size +
ctx->l4_hdr_size;
} else {
ctx->eth_ip_hdr_size = 0;
ctx->l4_hdr_size = 0;
/* copy as much as allowed */
ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
, skb_headlen(skb));
}
/* make sure headers are accessible directly */
if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
goto err;
}
if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
tq->stats.oversized_hdr++;
ctx->copy_size = 0;
return 0;
}
tdd = tq->data_ring.base + tq->tx_ring.next2fill;
memcpy(tdd->data, skb->data, ctx->copy_size);
dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n",
ctx->copy_size, tq->tx_ring.next2fill);
return 1;
err:
return -1;
}
static void
vmxnet3_prepare_tso(struct sk_buff *skb,
struct vmxnet3_tx_ctx *ctx)
{
struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
if (ctx->ipv4) {
struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
IPPROTO_TCP, 0);
} else {
struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
IPPROTO_TCP, 0);
}
}
/*
* Transmits a pkt thru a given tq
* Returns:
* NETDEV_TX_OK: descriptors are setup successfully
* NETDEV_TX_OK: error occured, the pkt is dropped
* NETDEV_TX_BUSY: tx ring is full, queue is stopped
*
* Side-effects:
* 1. tx ring may be changed
* 2. tq stats may be updated accordingly
* 3. shared->txNumDeferred may be updated
*/
static int
vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter, struct net_device *netdev)
{
int ret;
u32 count;
unsigned long flags;
struct vmxnet3_tx_ctx ctx;
union Vmxnet3_GenericDesc *gdesc;
/* conservatively estimate # of descriptors to use */
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
skb_shinfo(skb)->nr_frags + 1;
ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
ctx.mss = skb_shinfo(skb)->gso_size;
if (ctx.mss) {
if (skb_header_cloned(skb)) {
if (unlikely(pskb_expand_head(skb, 0, 0,
GFP_ATOMIC) != 0)) {
tq->stats.drop_tso++;
goto drop_pkt;
}
tq->stats.copy_skb_header++;
}
vmxnet3_prepare_tso(skb, &ctx);
} else {
if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
/* non-tso pkts must not use more than
* VMXNET3_MAX_TXD_PER_PKT entries
*/
if (skb_linearize(skb) != 0) {
tq->stats.drop_too_many_frags++;
goto drop_pkt;
}
tq->stats.linearized++;
/* recalculate the # of descriptors to use */
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
}
}
ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
if (ret >= 0) {
BUG_ON(ret <= 0 && ctx.copy_size != 0);
/* hdrs parsed, check against other limits */
if (ctx.mss) {
if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
VMXNET3_MAX_TX_BUF_SIZE)) {
goto hdr_too_big;
}
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (unlikely(ctx.eth_ip_hdr_size +
skb->csum_offset >
VMXNET3_MAX_CSUM_OFFSET)) {
goto hdr_too_big;
}
}
}
} else {
tq->stats.drop_hdr_inspect_err++;
goto drop_pkt;
}
spin_lock_irqsave(&tq->tx_lock, flags);
if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
tq->stats.tx_ring_full++;
dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u"
" next2fill %u\n", adapter->netdev->name,
tq->tx_ring.next2comp, tq->tx_ring.next2fill);
vmxnet3_tq_stop(tq, adapter);
spin_unlock_irqrestore(&tq->tx_lock, flags);
return NETDEV_TX_BUSY;
}
/* fill tx descs related to addr & len */
vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
/* setup the EOP desc */
ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;
/* setup the SOP desc */
gdesc = ctx.sop_txd;
if (ctx.mss) {
gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
gdesc->txd.om = VMXNET3_OM_TSO;
gdesc->txd.msscof = ctx.mss;
tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen +
ctx.mss - 1) / ctx.mss;
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
gdesc->txd.hlen = ctx.eth_ip_hdr_size;
gdesc->txd.om = VMXNET3_OM_CSUM;
gdesc->txd.msscof = ctx.eth_ip_hdr_size +
skb->csum_offset;
} else {
gdesc->txd.om = 0;
gdesc->txd.msscof = 0;
}
tq->shared->txNumDeferred++;
}
if (vlan_tx_tag_present(skb)) {
gdesc->txd.ti = 1;
gdesc->txd.tci = vlan_tx_tag_get(skb);
}
wmb();
/* finally flips the GEN bit of the SOP desc */
gdesc->dword[2] ^= VMXNET3_TXD_GEN;
dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
(u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
gdesc->dword[3]);
spin_unlock_irqrestore(&tq->tx_lock, flags);
if (tq->shared->txNumDeferred >= tq->shared->txThreshold) {
tq->shared->txNumDeferred = 0;
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
tq->tx_ring.next2fill);
}
netdev->trans_start = jiffies;
return NETDEV_TX_OK;
hdr_too_big:
tq->stats.drop_oversized_hdr++;
drop_pkt:
tq->stats.drop_total++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static netdev_tx_t
vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
return vmxnet3_tq_xmit(skb, tq, adapter, netdev);
}
static void
vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
struct sk_buff *skb,
union Vmxnet3_GenericDesc *gdesc)
{
if (!gdesc->rcd.cnc && adapter->rxcsum) {
/* typical case: TCP/UDP over IP and both csums are correct */
if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) ==
VMXNET3_RCD_CSUM_OK) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
BUG_ON(gdesc->rcd.frg);
} else {
if (gdesc->rcd.csum) {
skb->csum = htons(gdesc->rcd.csum);
skb->ip_summed = CHECKSUM_PARTIAL;
} else {
skb->ip_summed = CHECKSUM_NONE;
}
}
} else {
skb->ip_summed = CHECKSUM_NONE;
}
}
static void
vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
{
rq->stats.drop_err++;
if (!rcd->fcs)
rq->stats.drop_fcs++;
rq->stats.drop_total++;
/*
* We do not unmap and chain the rx buffer to the skb.
* We basically pretend this buffer is not used and will be recycled
* by vmxnet3_rq_alloc_rx_buf()
*/
/*
* ctx->skb may be NULL if this is the first and the only one
* desc for the pkt
*/
if (ctx->skb)
dev_kfree_skb_irq(ctx->skb);
ctx->skb = NULL;
}
static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter, int quota)
{
static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
u32 num_rxd = 0;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
while (rcd->gen == rq->comp_ring.gen) {
struct vmxnet3_rx_buf_info *rbi;
struct sk_buff *skb;
int num_to_alloc;
struct Vmxnet3_RxDesc *rxd;
u32 idx, ring_idx;
if (num_rxd >= quota) {
/* we may stop even before we see the EOP desc of
* the current pkt
*/
break;
}
num_rxd++;
idx = rcd->rxdIdx;
ring_idx = rcd->rqID == rq->qid ? 0 : 1;
rxd = &rq->rx_ring[ring_idx].base[idx].rxd;
rbi = rq->buf_info[ring_idx] + idx;
BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len);
if (unlikely(rcd->eop && rcd->err)) {
vmxnet3_rx_error(rq, rcd, ctx, adapter);
goto rcd_done;
}
if (rcd->sop) { /* first buf of the pkt */
BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
rcd->rqID != rq->qid);
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
if (unlikely(rcd->len == 0)) {
/* Pretend the rx buffer is skipped. */
BUG_ON(!(rcd->sop && rcd->eop));
dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n",
ring_idx, idx);
goto rcd_done;
}
ctx->skb = rbi->skb;
rbi->skb = NULL;
pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);
skb_put(ctx->skb, rcd->len);
} else {
BUG_ON(ctx->skb == NULL);
/* non SOP buffer must be type 1 in most cases */
if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
if (rcd->len) {
pci_unmap_page(adapter->pdev,
rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);
vmxnet3_append_frag(ctx->skb, rcd, rbi);
rbi->page = NULL;
}
} else {
/*
* The only time a non-SOP buffer is type 0 is
* when it's EOP and error flag is raised, which
* has already been handled.
*/
BUG_ON(true);
}
}
skb = ctx->skb;
if (rcd->eop) {
skb->len += skb->data_len;
skb->truesize += skb->data_len;
vmxnet3_rx_csum(adapter, skb,
(union Vmxnet3_GenericDesc *)rcd);
skb->protocol = eth_type_trans(skb, adapter->netdev);
if (unlikely(adapter->vlan_grp && rcd->ts)) {
vlan_hwaccel_receive_skb(skb,
adapter->vlan_grp, rcd->tci);
} else {
netif_receive_skb(skb);
}
adapter->netdev->last_rx = jiffies;
ctx->skb = NULL;
}
rcd_done:
/* device may skip some rx descs */
rq->rx_ring[ring_idx].next2comp = idx;
VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
rq->rx_ring[ring_idx].size);
/* refill rx buffers frequently to avoid starving the h/w */
num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
ring_idx);
if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
ring_idx, adapter))) {
vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
adapter);
/* if needed, update the register */
if (unlikely(rq->shared->updateRxProd)) {
VMXNET3_WRITE_BAR0_REG(adapter,
rxprod_reg[ring_idx] + rq->qid * 8,
rq->rx_ring[ring_idx].next2fill);
rq->uncommitted[ring_idx] = 0;
}
}
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
}
return num_rxd;
}
static void
vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter)
{
u32 i, ring_idx;
struct Vmxnet3_RxDesc *rxd;
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
rxd = &rq->rx_ring[ring_idx].base[i].rxd;
if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
rq->buf_info[ring_idx][i].skb) {
pci_unmap_single(adapter->pdev, rxd->addr,
rxd->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
rq->buf_info[ring_idx][i].skb = NULL;
} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
rq->buf_info[ring_idx][i].page) {
pci_unmap_page(adapter->pdev, rxd->addr,
rxd->len, PCI_DMA_FROMDEVICE);
put_page(rq->buf_info[ring_idx][i].page);
rq->buf_info[ring_idx][i].page = NULL;
}
}
rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
rq->rx_ring[ring_idx].next2fill =
rq->rx_ring[ring_idx].next2comp = 0;
rq->uncommitted[ring_idx] = 0;
}
rq->comp_ring.gen = VMXNET3_INIT_GEN;
rq->comp_ring.next2proc = 0;
}
void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter)
{
int i;
int j;
/* all rx buffers must have already been freed */
for (i = 0; i < 2; i++) {
if (rq->buf_info[i]) {
for (j = 0; j < rq->rx_ring[i].size; j++)
BUG_ON(rq->buf_info[i][j].page != NULL);
}
}
kfree(rq->buf_info[0]);
for (i = 0; i < 2; i++) {
if (rq->rx_ring[i].base) {
pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
* sizeof(struct Vmxnet3_RxDesc),
rq->rx_ring[i].base,
rq->rx_ring[i].basePA);
rq->rx_ring[i].base = NULL;
}
rq->buf_info[i] = NULL;
}
if (rq->comp_ring.base) {
pci_free_consistent(adapter->pdev, rq->comp_ring.size *
sizeof(struct Vmxnet3_RxCompDesc),
rq->comp_ring.base, rq->comp_ring.basePA);
rq->comp_ring.base = NULL;
}
}
static int
vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter)
{
int i;
/* initialize buf_info */
for (i = 0; i < rq->rx_ring[0].size; i++) {
/* 1st buf for a pkt is skbuff */
if (i % adapter->rx_buf_per_pkt == 0) {
rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
rq->buf_info[0][i].len = adapter->skb_buf_size;
} else { /* subsequent bufs for a pkt is frag */
rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
rq->buf_info[0][i].len = PAGE_SIZE;
}
}
for (i = 0; i < rq->rx_ring[1].size; i++) {
rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
rq->buf_info[1][i].len = PAGE_SIZE;
}
/* reset internal state and allocate buffers for both rings */
for (i = 0; i < 2; i++) {
rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
rq->uncommitted[i] = 0;
memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
sizeof(struct Vmxnet3_RxDesc));
rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
}
if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
adapter) == 0) {
/* at least has 1 rx buffer for the 1st ring */
return -ENOMEM;
}
vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
/* reset the comp ring */
rq->comp_ring.next2proc = 0;
memset(rq->comp_ring.base, 0, rq->comp_ring.size *
sizeof(struct Vmxnet3_RxCompDesc));
rq->comp_ring.gen = VMXNET3_INIT_GEN;
/* reset rxctx */
rq->rx_ctx.skb = NULL;
/* stats are not reset */
return 0;
}
static int
vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
{
int i;
size_t sz;
struct vmxnet3_rx_buf_info *bi;
for (i = 0; i < 2; i++) {
sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
&rq->rx_ring[i].basePA);
if (!rq->rx_ring[i].base) {
printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
adapter->netdev->name, i);
goto err;
}
}
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
&rq->comp_ring.basePA);
if (!rq->comp_ring.base) {
printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
adapter->netdev->name);
goto err;
}
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
rq->rx_ring[1].size);
bi = kmalloc(sz, GFP_KERNEL);
if (!bi) {
printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
adapter->netdev->name);
goto err;
}
memset(bi, 0, sz);
rq->buf_info[0] = bi;
rq->buf_info[1] = bi + rq->rx_ring[0].size;
return 0;
err:
vmxnet3_rq_destroy(rq, adapter);
return -ENOMEM;
}
static int
vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
{
if (unlikely(adapter->shared->ecr))
vmxnet3_process_events(adapter);
vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
}
static int
vmxnet3_poll(struct napi_struct *napi, int budget)
{
struct vmxnet3_adapter *adapter = container_of(napi,
struct vmxnet3_adapter, napi);
int rxd_done;
rxd_done = vmxnet3_do_poll(adapter, budget);
if (rxd_done < budget) {
napi_complete(napi);
vmxnet3_enable_intr(adapter, 0);
}
return rxd_done;
}
/* Interrupt handler for vmxnet3 */
static irqreturn_t
vmxnet3_intr(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct vmxnet3_adapter *adapter = netdev_priv(dev);
if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
if (unlikely(icr == 0))
/* not ours */
return IRQ_NONE;
}
/* disable intr if needed */
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_intr(adapter, 0);
napi_schedule(&adapter->napi);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/* netpoll callback. */
static void
vmxnet3_netpoll(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int irq;
if (adapter->intr.type == VMXNET3_IT_MSIX)
irq = adapter->intr.msix_entries[0].vector;
else
irq = adapter->pdev->irq;
disable_irq(irq);
vmxnet3_intr(irq, netdev);
enable_irq(irq);
}
#endif
static int
vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
{
int err;
if (adapter->intr.type == VMXNET3_IT_MSIX) {
/* we only use 1 MSI-X vector */
err = request_irq(adapter->intr.msix_entries[0].vector,
vmxnet3_intr, 0, adapter->netdev->name,
adapter->netdev);
} else if (adapter->intr.type == VMXNET3_IT_MSI) {
err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
adapter->netdev->name, adapter->netdev);
} else {
err = request_irq(adapter->pdev->irq, vmxnet3_intr,
IRQF_SHARED, adapter->netdev->name,
adapter->netdev);
}
if (err)
printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
":%d\n", adapter->netdev->name, adapter->intr.type, err);
if (!err) {
int i;
/* init our intr settings */
for (i = 0; i < adapter->intr.num_intrs; i++)
adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
/* next setup intr index for all intr sources */
adapter->tx_queue.comp_ring.intr_idx = 0;
adapter->rx_queue.comp_ring.intr_idx = 0;
adapter->intr.event_intr_idx = 0;
printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
"allocated\n", adapter->netdev->name, adapter->intr.type,
adapter->intr.mask_mode, adapter->intr.num_intrs);
}
return err;
}
static void
vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
{
BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
adapter->intr.num_intrs <= 0);
switch (adapter->intr.type) {
case VMXNET3_IT_MSIX:
{
int i;
for (i = 0; i < adapter->intr.num_intrs; i++)
free_irq(adapter->intr.msix_entries[i].vector,
adapter->netdev);
break;
}
case VMXNET3_IT_MSI:
free_irq(adapter->pdev->irq, adapter->netdev);
break;
case VMXNET3_IT_INTX:
free_irq(adapter->pdev->irq, adapter->netdev);
break;
default:
BUG_ON(true);
}
}
static void
vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct Vmxnet3_DriverShared *shared = adapter->shared;
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
if (grp) {
/* add vlan rx stripping. */
if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
int i;
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
adapter->vlan_grp = grp;
/* update FEATURES to device */
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
/*
* Clear entire vfTable; then enable untagged pkts.
* Note: setting one entry in vfTable to non-zero turns
* on VLAN rx filtering.
*/
for (i = 0; i < VMXNET3_VFT_SIZE; i++)
vfTable[i] = 0;
VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
} else {
printk(KERN_ERR "%s: vlan_rx_register when device has "
"no NETIF_F_HW_VLAN_RX\n", netdev->name);
}
} else {
/* remove vlan rx stripping. */
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
adapter->vlan_grp = NULL;
if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
int i;
for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
/* clear entire vfTable; this also disables
* VLAN rx filtering
*/
vfTable[i] = 0;
}
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
/* update FEATURES to device */
devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}
}
}
static void
vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
{
if (adapter->vlan_grp) {
u16 vid;
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
bool activeVlan = false;
for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
if (vlan_group_get_device(adapter->vlan_grp, vid)) {
VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
activeVlan = true;
}
}
if (activeVlan) {
/* continue to allow untagged pkts */
VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
}
}
}
static void
vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
}
static void
vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
}
static u8 *
vmxnet3_copy_mc(struct net_device *netdev)
{
u8 *buf = NULL;
u32 sz = netdev->mc_count * ETH_ALEN;
/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
if (sz <= 0xffff) {
/* We may be called with BH disabled */
buf = kmalloc(sz, GFP_ATOMIC);
if (buf) {
int i;
struct dev_mc_list *mc = netdev->mc_list;
for (i = 0; i < netdev->mc_count; i++) {
BUG_ON(!mc);
memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
ETH_ALEN);
mc = mc->next;
}
}
}
return buf;
}
static void
vmxnet3_set_mc(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct Vmxnet3_RxFilterConf *rxConf =
&adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL;
u32 new_mode = VMXNET3_RXM_UCAST;
if (netdev->flags & IFF_PROMISC)
new_mode |= VMXNET3_RXM_PROMISC;
if (netdev->flags & IFF_BROADCAST)
new_mode |= VMXNET3_RXM_BCAST;
if (netdev->flags & IFF_ALLMULTI)
new_mode |= VMXNET3_RXM_ALL_MULTI;
else
if (netdev->mc_count > 0) {
new_table = vmxnet3_copy_mc(netdev);
if (new_table) {
new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTableLen = netdev->mc_count *
ETH_ALEN;
rxConf->mfTablePA = virt_to_phys(new_table);
} else {
printk(KERN_INFO "%s: failed to copy mcast list"
", setting ALL_MULTI\n", netdev->name);
new_mode |= VMXNET3_RXM_ALL_MULTI;
}
}
if (!(new_mode & VMXNET3_RXM_MCAST)) {
rxConf->mfTableLen = 0;
rxConf->mfTablePA = 0;
}
if (new_mode != rxConf->rxMode) {
rxConf->rxMode = new_mode;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_RX_MODE);
}
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_MAC_FILTERS);
kfree(new_table);
}
/*
* Set up driver_shared based on settings in adapter.
*/
static void
vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_DriverShared *shared = adapter->shared;
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
struct Vmxnet3_TxQueueConf *tqc;
struct Vmxnet3_RxQueueConf *rqc;
int i;
memset(shared, 0, sizeof(*shared));
/* driver settings */
shared->magic = VMXNET3_REV1_MAGIC;
devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
devRead->misc.driverInfo.vmxnet3RevSpt = 1;
devRead->misc.driverInfo.uptVerSpt = 1;
devRead->misc.ddPA = virt_to_phys(adapter);
devRead->misc.ddLen = sizeof(struct vmxnet3_adapter);
/* set up feature flags */
if (adapter->rxcsum)
devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
if (adapter->lro) {
devRead->misc.uptFeatures |= UPT1_F_LRO;
devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS;
}
if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
&& adapter->vlan_grp) {
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
}
devRead->misc.mtu = adapter->netdev->mtu;
devRead->misc.queueDescPA = adapter->queue_desc_pa;
devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) +
sizeof(struct Vmxnet3_RxQueueDesc);
/* tx queue settings */
BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
devRead->misc.numTxQueues = 1;
tqc = &adapter->tqd_start->conf;
tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA;
tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA;
tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA;
tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info);
tqc->txRingSize = adapter->tx_queue.tx_ring.size;
tqc->dataRingSize = adapter->tx_queue.data_ring.size;
tqc->compRingSize = adapter->tx_queue.comp_ring.size;
tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) *
tqc->txRingSize;
tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
/* rx queue settings */
devRead->misc.numRxQueues = 1;
rqc = &adapter->rqd_start->conf;
rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA;
rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA;
rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA;
rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info);
rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size;
rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size;
rqc->compRingSize = adapter->rx_queue.comp_ring.size;
rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) *
(rqc->rxRingSize[0] + rqc->rxRingSize[1]);
rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
/* intr settings */
devRead->intrConf.autoMask = adapter->intr.mask_mode ==
VMXNET3_IMM_AUTO;
devRead->intrConf.numIntrs = adapter->intr.num_intrs;
for (i = 0; i < adapter->intr.num_intrs; i++)
devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
/* rx filter settings */
devRead->rxFilterConf.rxMode = 0;
vmxnet3_restore_vlan(adapter);
/* the rest are already zeroed */
}
int
vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
{
int err;
u32 ret;
dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
" %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
adapter->rx_queue.rx_ring[0].size,
adapter->rx_queue.rx_ring[1].size);
vmxnet3_tq_init(&adapter->tx_queue, adapter);
err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
if (err) {
printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
adapter->netdev->name, err);
goto rq_err;
}
err = vmxnet3_request_irqs(adapter);
if (err) {
printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
adapter->netdev->name, err);
goto irq_err;
}
vmxnet3_setup_driver_shared(adapter);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL,
VMXNET3_GET_ADDR_LO(adapter->shared_pa));
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH,
VMXNET3_GET_ADDR_HI(adapter->shared_pa));
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_ACTIVATE_DEV);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
if (ret != 0) {
printk(KERN_ERR "Failed to activate dev %s: error %u\n",
adapter->netdev->name, ret);
err = -EINVAL;
goto activate_err;
}
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
adapter->rx_queue.rx_ring[0].next2fill);
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
adapter->rx_queue.rx_ring[1].next2fill);
/* Apply the rx filter settins last. */
vmxnet3_set_mc(adapter->netdev);
/*
* Check link state when first activating device. It will start the
* tx queue if the link is up.
*/
vmxnet3_check_link(adapter);
napi_enable(&adapter->napi);
vmxnet3_enable_all_intrs(adapter);
clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
return 0;
activate_err:
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
vmxnet3_free_irqs(adapter);
irq_err:
rq_err:
/* free up buffers we allocated */
vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
return err;
}
void
vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
{
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
}
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
{
if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
return 0;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_QUIESCE_DEV);
vmxnet3_disable_all_intrs(adapter);
napi_disable(&adapter->napi);
netif_tx_disable(adapter->netdev);
adapter->link_speed = 0;
netif_carrier_off(adapter->netdev);
vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
vmxnet3_free_irqs(adapter);
return 0;
}
static void
vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
{
u32 tmp;
tmp = *(u32 *)mac;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
tmp = (mac[5] << 8) | mac[4];
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
}
static int
vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
{
struct sockaddr *addr = p;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
vmxnet3_write_mac_addr(adapter, addr->sa_data);
return 0;
}
/* ==================== initialization and cleanup routines ============ */
static int
vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
{
int err;
unsigned long mmio_start, mmio_len;
struct pci_dev *pdev = adapter->pdev;
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
pci_name(pdev), err);
return err;
}
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
printk(KERN_ERR "pci_set_consistent_dma_mask failed "
"for adapter %s\n", pci_name(pdev));
err = -EIO;
goto err_set_mask;
}
*dma64 = true;
} else {
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
printk(KERN_ERR "pci_set_dma_mask failed for adapter "
"%s\n", pci_name(pdev));
err = -EIO;
goto err_set_mask;
}
*dma64 = false;
}
err = pci_request_selected_regions(pdev, (1 << 2) - 1,
vmxnet3_driver_name);
if (err) {
printk(KERN_ERR "Failed to request region for adapter %s: "
"error %d\n", pci_name(pdev), err);
goto err_set_mask;
}
pci_set_master(pdev);
mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
if (!adapter->hw_addr0) {
printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
pci_name(pdev));
err = -EIO;
goto err_ioremap;
}
mmio_start = pci_resource_start(pdev, 1);
mmio_len = pci_resource_len(pdev, 1);
adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
if (!adapter->hw_addr1) {
printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
pci_name(pdev));
err = -EIO;
goto err_bar1;
}
return 0;
err_bar1:
iounmap(adapter->hw_addr0);
err_ioremap:
pci_release_selected_regions(pdev, (1 << 2) - 1);
err_set_mask:
pci_disable_device(pdev);
return err;
}
static void
vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
{
BUG_ON(!adapter->pdev);
iounmap(adapter->hw_addr0);
iounmap(adapter->hw_addr1);
pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
pci_disable_device(adapter->pdev);
}
static void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
{
size_t sz;
if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
VMXNET3_MAX_ETH_HDR_SIZE) {
adapter->skb_buf_size = adapter->netdev->mtu +
VMXNET3_MAX_ETH_HDR_SIZE;
if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
adapter->rx_buf_per_pkt = 1;
} else {
adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
VMXNET3_MAX_ETH_HDR_SIZE;
adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
}
/*
* for simplicity, force the ring0 size to be a multiple of
* rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
*/
sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
sz - 1) / sz * sz;
adapter->rx_queue.rx_ring[0].size = min_t(u32,
adapter->rx_queue.rx_ring[0].size,
VMXNET3_RX_RING_MAX_SIZE / sz * sz);
}
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
u32 rx_ring_size, u32 rx_ring2_size)
{
int err;
adapter->tx_queue.tx_ring.size = tx_ring_size;
adapter->tx_queue.data_ring.size = tx_ring_size;
adapter->tx_queue.comp_ring.size = tx_ring_size;
adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
adapter->tx_queue.stopped = true;
err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
if (err)
return err;
adapter->rx_queue.rx_ring[0].size = rx_ring_size;
adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
vmxnet3_adjust_rx_ring_size(adapter);
adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size +
adapter->rx_queue.rx_ring[1].size;
adapter->rx_queue.qid = 0;
adapter->rx_queue.qid2 = 1;
adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
if (err)
vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
return err;
}
static int
vmxnet3_open(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter;
int err;
adapter = netdev_priv(netdev);
spin_lock_init(&adapter->tx_queue.tx_lock);
err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
VMXNET3_DEF_RX_RING_SIZE,
VMXNET3_DEF_RX_RING_SIZE);
if (err)
goto queue_err;
err = vmxnet3_activate_dev(adapter);
if (err)
goto activate_err;
return 0;
activate_err:
vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
queue_err:
return err;
}
static int
vmxnet3_close(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
/*
* Reset_work may be in the middle of resetting the device, wait for its
* completion.
*/
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
msleep(1);
vmxnet3_quiesce_dev(adapter);
vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
return 0;
}
void
vmxnet3_force_close(struct vmxnet3_adapter *adapter)
{
/*
* we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
* vmxnet3_close() will deadlock.
*/
BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
/* we need to enable NAPI, otherwise dev_close will deadlock */
napi_enable(&adapter->napi);
dev_close(adapter->netdev);
}
static int
vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int err = 0;
if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
return -EINVAL;
if (new_mtu > 1500 && !adapter->jumbo_frame)
return -EINVAL;
netdev->mtu = new_mtu;
/*
* Reset_work may be in the middle of resetting the device, wait for its
* completion.
*/
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
msleep(1);
if (netif_running(netdev)) {
vmxnet3_quiesce_dev(adapter);
vmxnet3_reset_dev(adapter);
/* we need to re-create the rx queue based on the new mtu */
vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
vmxnet3_adjust_rx_ring_size(adapter);
adapter->rx_queue.comp_ring.size =
adapter->rx_queue.rx_ring[0].size +
adapter->rx_queue.rx_ring[1].size;
err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
if (err) {
printk(KERN_ERR "%s: failed to re-create rx queue,"
" error %d. Closing it.\n", netdev->name, err);
goto out;
}
err = vmxnet3_activate_dev(adapter);
if (err) {
printk(KERN_ERR "%s: failed to re-activate, error %d. "
"Closing it\n", netdev->name, err);
goto out;
}
}
out:
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
if (err)
vmxnet3_force_close(adapter);
return err;
}
static void
vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
{
struct net_device *netdev = adapter->netdev;
netdev->features = NETIF_F_SG |
NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_LRO;
printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
adapter->rxcsum = true;
adapter->jumbo_frame = true;
adapter->lro = true;
if (dma64) {
netdev->features |= NETIF_F_HIGHDMA;
printk(" highDMA");
}
netdev->vlan_features = netdev->features;
printk("\n");
}
static void
vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
{
u32 tmp;
tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
*(u32 *)mac = tmp;
tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
mac[4] = tmp & 0xff;
mac[5] = (tmp >> 8) & 0xff;
}
static void
vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
{
u32 cfg;
/* intr settings */
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_CONF_INTR);
cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
adapter->intr.type = cfg & 0x3;
adapter->intr.mask_mode = (cfg >> 2) & 0x3;
if (adapter->intr.type == VMXNET3_IT_AUTO) {
int err;
adapter->intr.msix_entries[0].entry = 0;
err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
VMXNET3_LINUX_MAX_MSIX_VECT);
if (!err) {
adapter->intr.num_intrs = 1;
adapter->intr.type = VMXNET3_IT_MSIX;
return;
}
err = pci_enable_msi(adapter->pdev);
if (!err) {
adapter->intr.num_intrs = 1;
adapter->intr.type = VMXNET3_IT_MSI;
return;
}
}
adapter->intr.type = VMXNET3_IT_INTX;
/* INT-X related setting */
adapter->intr.num_intrs = 1;
}
static void
vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
{
if (adapter->intr.type == VMXNET3_IT_MSIX)
pci_disable_msix(adapter->pdev);
else if (adapter->intr.type == VMXNET3_IT_MSI)
pci_disable_msi(adapter->pdev);
else
BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
}
static void
vmxnet3_tx_timeout(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
schedule_work(&adapter->work);
}
static void
vmxnet3_reset_work(struct work_struct *data)
{
struct vmxnet3_adapter *adapter;
adapter = container_of(data, struct vmxnet3_adapter, work);
/* if another thread is resetting the device, no need to proceed */
if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
return;
/* if the device is closed, we must leave it alone */
if (netif_running(adapter->netdev)) {
printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
vmxnet3_quiesce_dev(adapter);
vmxnet3_reset_dev(adapter);
vmxnet3_activate_dev(adapter);
} else {
printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
}
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
}
static int __devinit
vmxnet3_probe_device(struct pci_dev *pdev,
const struct pci_device_id *id)
{
static const struct net_device_ops vmxnet3_netdev_ops = {
.ndo_open = vmxnet3_open,
.ndo_stop = vmxnet3_close,
.ndo_start_xmit = vmxnet3_xmit_frame,
.ndo_set_mac_address = vmxnet3_set_mac_addr,
.ndo_change_mtu = vmxnet3_change_mtu,
.ndo_get_stats = vmxnet3_get_stats,
.ndo_tx_timeout = vmxnet3_tx_timeout,
.ndo_set_multicast_list = vmxnet3_set_mc,
.ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
.ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = vmxnet3_netpoll,
#endif
};
int err;
bool dma64 = false; /* stupid gcc */
u32 ver;
struct net_device *netdev;
struct vmxnet3_adapter *adapter;
u8 mac[ETH_ALEN];
netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
if (!netdev) {
printk(KERN_ERR "Failed to alloc ethernet device for adapter "
"%s\n", pci_name(pdev));
return -ENOMEM;
}
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->shared = pci_alloc_consistent(adapter->pdev,
sizeof(struct Vmxnet3_DriverShared),
&adapter->shared_pa);
if (!adapter->shared) {
printk(KERN_ERR "Failed to allocate memory for %s\n",
pci_name(pdev));
err = -ENOMEM;
goto err_alloc_shared;
}
adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
sizeof(struct Vmxnet3_TxQueueDesc) +
sizeof(struct Vmxnet3_RxQueueDesc),
&adapter->queue_desc_pa);
if (!adapter->tqd_start) {
printk(KERN_ERR "Failed to allocate memory for %s\n",
pci_name(pdev));
err = -ENOMEM;
goto err_alloc_queue_desc;
}
adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
+ 1);
adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
if (adapter->pm_conf == NULL) {
printk(KERN_ERR "Failed to allocate memory for %s\n",
pci_name(pdev));
err = -ENOMEM;
goto err_alloc_pm;
}
err = vmxnet3_alloc_pci_resources(adapter, &dma64);
if (err < 0)
goto err_alloc_pci;
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
if (ver & 1) {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
} else {
printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
" %s\n", ver, pci_name(pdev));
err = -EBUSY;
goto err_ver;
}
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
if (ver & 1) {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
} else {
printk(KERN_ERR "Incompatible upt version (0x%x) for "
"adapter %s\n", ver, pci_name(pdev));
err = -EBUSY;
goto err_ver;
}
vmxnet3_declare_features(adapter, dma64);
adapter->dev_number = atomic_read(&devices_found);
vmxnet3_alloc_intr_resources(adapter);
vmxnet3_read_mac_addr(adapter, mac);
memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev->netdev_ops = &vmxnet3_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
vmxnet3_set_ethtool_ops(netdev);
INIT_WORK(&adapter->work, vmxnet3_reset_work);
netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
SET_NETDEV_DEV(netdev, &pdev->dev);
err = register_netdev(netdev);
if (err) {
printk(KERN_ERR "Failed to register adapter %s\n",
pci_name(pdev));
goto err_register;
}
set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
atomic_inc(&devices_found);
return 0;
err_register:
vmxnet3_free_intr_resources(adapter);
err_ver:
vmxnet3_free_pci_resources(adapter);
err_alloc_pci:
kfree(adapter->pm_conf);
err_alloc_pm:
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
sizeof(struct Vmxnet3_RxQueueDesc),
adapter->tqd_start, adapter->queue_desc_pa);
err_alloc_queue_desc:
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
err_alloc_shared:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
}
static void __devexit
vmxnet3_remove_device(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
flush_scheduled_work();
unregister_netdev(netdev);
vmxnet3_free_intr_resources(adapter);
vmxnet3_free_pci_resources(adapter);
kfree(adapter->pm_conf);
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
sizeof(struct Vmxnet3_RxQueueDesc),
adapter->tqd_start, adapter->queue_desc_pa);
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
free_netdev(netdev);
}
#ifdef CONFIG_PM
static int
vmxnet3_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct Vmxnet3_PMConf *pmConf;
struct ethhdr *ehdr;
struct arphdr *ahdr;
u8 *arpreq;
struct in_device *in_dev;
struct in_ifaddr *ifa;
int i = 0;
if (!netif_running(netdev))
return 0;
vmxnet3_disable_all_intrs(adapter);
vmxnet3_free_irqs(adapter);
vmxnet3_free_intr_resources(adapter);
netif_device_detach(netdev);
netif_stop_queue(netdev);
/* Create wake-up filters. */
pmConf = adapter->pm_conf;
memset(pmConf, 0, sizeof(*pmConf));
if (adapter->wol & WAKE_UCAST) {
pmConf->filters[i].patternSize = ETH_ALEN;
pmConf->filters[i].maskSize = 1;
memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
if (adapter->wol & WAKE_ARP) {
in_dev = in_dev_get(netdev);
if (!in_dev)
goto skip_arp;
ifa = (struct in_ifaddr *)in_dev->ifa_list;
if (!ifa)
goto skip_arp;
pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
sizeof(struct arphdr) + /* ARP header */
2 * ETH_ALEN + /* 2 Ethernet addresses*/
2 * sizeof(u32); /*2 IPv4 addresses */
pmConf->filters[i].maskSize =
(pmConf->filters[i].patternSize - 1) / 8 + 1;
/* ETH_P_ARP in Ethernet header. */
ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
ehdr->h_proto = htons(ETH_P_ARP);
/* ARPOP_REQUEST in ARP header. */
ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
ahdr->ar_op = htons(ARPOP_REQUEST);
arpreq = (u8 *)(ahdr + 1);
/* The Unicast IPv4 address in 'tip' field. */
arpreq += 2 * ETH_ALEN + sizeof(u32);
*(u32 *)arpreq = ifa->ifa_address;
/* The mask for the relevant bits. */
pmConf->filters[i].mask[0] = 0x00;
pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
pmConf->filters[i].mask[3] = 0x00;
pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
in_dev_put(in_dev);
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
skip_arp:
if (adapter->wol & WAKE_MAGIC)
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
pmConf->numFilters = i;
adapter->shared->devRead.pmConfDesc.confVer = 1;
adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_PMCFG);
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
adapter->wol);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
return 0;
}
static int
vmxnet3_resume(struct device *device)
{
int err;
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct Vmxnet3_PMConf *pmConf;
if (!netif_running(netdev))
return 0;
/* Destroy wake-up filters. */
pmConf = adapter->pm_conf;
memset(pmConf, 0, sizeof(*pmConf));
adapter->shared->devRead.pmConfDesc.confVer = 1;
adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
netif_device_attach(netdev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
err = pci_enable_device_mem(pdev);
if (err != 0)
return err;
pci_enable_wake(pdev, PCI_D0, 0);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_PMCFG);
vmxnet3_alloc_intr_resources(adapter);
vmxnet3_request_irqs(adapter);
vmxnet3_enable_all_intrs(adapter);
return 0;
}
static struct dev_pm_ops vmxnet3_pm_ops = {
.suspend = vmxnet3_suspend,
.resume = vmxnet3_resume,
};
#endif
static struct pci_driver vmxnet3_driver = {
.name = vmxnet3_driver_name,
.id_table = vmxnet3_pciid_table,
.probe = vmxnet3_probe_device,
.remove = __devexit_p(vmxnet3_remove_device),
#ifdef CONFIG_PM
.driver.pm = &vmxnet3_pm_ops,
#endif
};
static int __init
vmxnet3_init_module(void)
{
printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
VMXNET3_DRIVER_VERSION_REPORT);
return pci_register_driver(&vmxnet3_driver);
}
module_init(vmxnet3_init_module);
static void
vmxnet3_exit_module(void)
{
pci_unregister_driver(&vmxnet3_driver);
}
module_exit(vmxnet3_exit_module);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
MODULE_LICENSE("GPL v2");
MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
*
*/
#include "vmxnet3_int.h"
struct vmxnet3_stat_desc {
char desc[ETH_GSTRING_LEN];
int offset;
};
static u32
vmxnet3_get_rx_csum(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
return adapter->rxcsum;
}
static int
vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (adapter->rxcsum != val) {
adapter->rxcsum = val;
if (netif_running(netdev)) {
if (val)
adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_RXCSUM;
else
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_RXCSUM;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}
}
return 0;
}
/* per tq stats maintained by the device */
static const struct vmxnet3_stat_desc
vmxnet3_tq_dev_stats[] = {
/* description, offset */
{ "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
{ "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
{ "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
{ "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
{ "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
{ "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
{ "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
{ "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
{ "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
{ "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
};
/* per tq stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_tq_driver_stats[] = {
/* description, offset */
{"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
drop_total) },
{ " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
drop_too_many_frags) },
{ " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
drop_oversized_hdr) },
{ " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
drop_hdr_inspect_err) },
{ " tso", offsetof(struct vmxnet3_tq_driver_stats,
drop_tso) },
{ "ring full", offsetof(struct vmxnet3_tq_driver_stats,
tx_ring_full) },
{ "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
linearized) },
{ "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
copy_skb_header) },
{ "giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
oversized_hdr) },
};
/* per rq stats maintained by the device */
static const struct vmxnet3_stat_desc
vmxnet3_rq_dev_stats[] = {
{ "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
{ "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
{ "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
{ "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
{ "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
{ "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
{ "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
{ "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
{ "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
{ "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
};
/* per rq stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_rq_driver_stats[] = {
/* description, offset */
{ "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
drop_total) },
{ " err", offsetof(struct vmxnet3_rq_driver_stats,
drop_err) },
{ " fcs", offsetof(struct vmxnet3_rq_driver_stats,
drop_fcs) },
{ "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
rx_buf_alloc_failure) },
};
/* gloabl stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_global_stats[] = {
/* description, offset */
{ "tx timeout count", offsetof(struct vmxnet3_adapter,
tx_timeout_count) }
};
struct net_device_stats *
vmxnet3_get_stats(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter;
struct vmxnet3_tq_driver_stats *drvTxStats;
struct vmxnet3_rq_driver_stats *drvRxStats;
struct UPT1_TxStats *devTxStats;
struct UPT1_RxStats *devRxStats;
struct net_device_stats *net_stats = &netdev->stats;
adapter = netdev_priv(netdev);
/* Collect the dev stats into the shared area */
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
/* Assuming that we have a single queue device */
devTxStats = &adapter->tqd_start->stats;
devRxStats = &adapter->rqd_start->stats;
/* Get access to the driver stats per queue */
drvTxStats = &adapter->tx_queue.stats;
drvRxStats = &adapter->rx_queue.stats;
memset(net_stats, 0, sizeof(*net_stats));
net_stats->rx_packets = devRxStats->ucastPktsRxOK +
devRxStats->mcastPktsRxOK +
devRxStats->bcastPktsRxOK;
net_stats->tx_packets = devTxStats->ucastPktsTxOK +
devTxStats->mcastPktsTxOK +
devTxStats->bcastPktsTxOK;
net_stats->rx_bytes = devRxStats->ucastBytesRxOK +
devRxStats->mcastBytesRxOK +
devRxStats->bcastBytesRxOK;
net_stats->tx_bytes = devTxStats->ucastBytesTxOK +
devTxStats->mcastBytesTxOK +
devTxStats->bcastBytesTxOK;
net_stats->rx_errors = devRxStats->pktsRxError;
net_stats->tx_errors = devTxStats->pktsTxError;
net_stats->rx_dropped = drvRxStats->drop_total;
net_stats->tx_dropped = drvTxStats->drop_total;
net_stats->multicast = devRxStats->mcastPktsRxOK;
return net_stats;
}
static int
vmxnet3_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(vmxnet3_tq_dev_stats) +
ARRAY_SIZE(vmxnet3_tq_driver_stats) +
ARRAY_SIZE(vmxnet3_rq_dev_stats) +
ARRAY_SIZE(vmxnet3_rq_driver_stats) +
ARRAY_SIZE(vmxnet3_global_stats);
default:
return -EOPNOTSUPP;
}
}
static int
vmxnet3_get_regs_len(struct net_device *netdev)
{
return 20 * sizeof(u32);
}
static void
vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
sizeof(drvinfo->version));
drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
ETHTOOL_BUSINFO_LEN);
drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS);
drvinfo->testinfo_len = 0;
drvinfo->eedump_len = 0;
drvinfo->regdump_len = vmxnet3_get_regs_len(netdev);
}
static void
vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
{
if (stringset == ETH_SS_STATS) {
int i;
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
ETH_GSTRING_LEN);
buf += ETH_GSTRING_LEN;
}
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) {
memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
ETH_GSTRING_LEN);
buf += ETH_GSTRING_LEN;
}
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
ETH_GSTRING_LEN);
buf += ETH_GSTRING_LEN;
}
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) {
memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
ETH_GSTRING_LEN);
buf += ETH_GSTRING_LEN;
}
for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
memcpy(buf, vmxnet3_global_stats[i].desc,
ETH_GSTRING_LEN);
buf += ETH_GSTRING_LEN;
}
}
}
static u32
vmxnet3_get_flags(struct net_device *netdev) {
return netdev->features;
}
static int
vmxnet3_set_flags(struct net_device *netdev, u32 data) {
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
if (lro_requested ^ lro_present) {
/* toggle the LRO feature*/
netdev->features ^= NETIF_F_LRO;
/* update harware LRO capability accordingly */
if (lro_requested)
adapter->shared->devRead.misc.uptFeatures &= UPT1_F_LRO;
else
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_LRO;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}
return 0;
}
static void
vmxnet3_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *buf)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u8 *base;
int i;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
/* this does assume each counter is 64-bit wide */
base = (u8 *)&adapter->tqd_start->stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
base = (u8 *)&adapter->tx_queue.stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
base = (u8 *)&adapter->rqd_start->stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
base = (u8 *)&adapter->rx_queue.stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
base = (u8 *)adapter;
for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset);
}
static void
vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *buf = p;
memset(p, 0, vmxnet3_get_regs_len(netdev));
regs->version = 1;
/* Update vmxnet3_get_regs_len if we want to dump more registers */
/* make each ring use multiple of 16 bytes */
buf[0] = adapter->tx_queue.tx_ring.next2fill;
buf[1] = adapter->tx_queue.tx_ring.next2comp;
buf[2] = adapter->tx_queue.tx_ring.gen;
buf[3] = 0;
buf[4] = adapter->tx_queue.comp_ring.next2proc;
buf[5] = adapter->tx_queue.comp_ring.gen;
buf[6] = adapter->tx_queue.stopped;
buf[7] = 0;
buf[8] = adapter->rx_queue.rx_ring[0].next2fill;
buf[9] = adapter->rx_queue.rx_ring[0].next2comp;
buf[10] = adapter->rx_queue.rx_ring[0].gen;
buf[11] = 0;
buf[12] = adapter->rx_queue.rx_ring[1].next2fill;
buf[13] = adapter->rx_queue.rx_ring[1].next2comp;
buf[14] = adapter->rx_queue.rx_ring[1].gen;
buf[15] = 0;
buf[16] = adapter->rx_queue.comp_ring.next2proc;
buf[17] = adapter->rx_queue.comp_ring.gen;
buf[18] = 0;
buf[19] = 0;
}
static void
vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC;
wol->wolopts = adapter->wol;
}
static int
vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST |
WAKE_MAGICSECURE)) {
return -EOPNOTSUPP;
}
adapter->wol = wol->wolopts;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
return 0;
}
static int
vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full |
SUPPORTED_TP;
ecmd->advertising = ADVERTISED_TP;
ecmd->port = PORT_TP;
ecmd->transceiver = XCVR_INTERNAL;
if (adapter->link_speed) {
ecmd->speed = adapter->link_speed;
ecmd->duplex = DUPLEX_FULL;
} else {
ecmd->speed = -1;
ecmd->duplex = -1;
}
return 0;
}
static void
vmxnet3_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *param)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
param->rx_mini_max_pending = 0;
param->rx_jumbo_max_pending = 0;
param->rx_pending = adapter->rx_queue.rx_ring[0].size;
param->tx_pending = adapter->tx_queue.tx_ring.size;
param->rx_mini_pending = 0;
param->rx_jumbo_pending = 0;
}
static int
vmxnet3_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *param)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 new_tx_ring_size, new_rx_ring_size;
u32 sz;
int err = 0;
if (param->tx_pending == 0 || param->tx_pending >
VMXNET3_TX_RING_MAX_SIZE)
return -EINVAL;
if (param->rx_pending == 0 || param->rx_pending >
VMXNET3_RX_RING_MAX_SIZE)
return -EINVAL;
/* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
~VMXNET3_RING_SIZE_MASK;
new_tx_ring_size = min_t(u32, new_tx_ring_size,
VMXNET3_TX_RING_MAX_SIZE);
if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size %
VMXNET3_RING_SIZE_ALIGN) != 0)
return -EINVAL;
/* ring0 has to be a multiple of
* rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
*/
sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz;
new_rx_ring_size = min_t(u32, new_rx_ring_size,
VMXNET3_RX_RING_MAX_SIZE / sz * sz);
if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size %
sz) != 0)
return -EINVAL;
if (new_tx_ring_size == adapter->tx_queue.tx_ring.size &&
new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) {
return 0;
}
/*
* Reset_work may be in the middle of resetting the device, wait for its
* completion.
*/
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
msleep(1);
if (netif_running(netdev)) {
vmxnet3_quiesce_dev(adapter);
vmxnet3_reset_dev(adapter);
/* recreate the rx queue and the tx queue based on the
* new sizes */
vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
err = vmxnet3_create_queues(adapter, new_tx_ring_size,
new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
if (err) {
/* failed, most likely because of OOM, try default
* size */
printk(KERN_ERR "%s: failed to apply new sizes, try the"
" default ones\n", netdev->name);
err = vmxnet3_create_queues(adapter,
VMXNET3_DEF_TX_RING_SIZE,
VMXNET3_DEF_RX_RING_SIZE,
VMXNET3_DEF_RX_RING_SIZE);
if (err) {
printk(KERN_ERR "%s: failed to create queues "
"with default sizes. Closing it\n",
netdev->name);
goto out;
}
}
err = vmxnet3_activate_dev(adapter);
if (err)
printk(KERN_ERR "%s: failed to re-activate, error %d."
" Closing it\n", netdev->name, err);
}
out:
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
if (err)
vmxnet3_force_close(adapter);
return err;
}
static struct ethtool_ops vmxnet3_ethtool_ops = {
.get_settings = vmxnet3_get_settings,
.get_drvinfo = vmxnet3_get_drvinfo,
.get_regs_len = vmxnet3_get_regs_len,
.get_regs = vmxnet3_get_regs,
.get_wol = vmxnet3_get_wol,
.set_wol = vmxnet3_set_wol,
.get_link = ethtool_op_get_link,
.get_rx_csum = vmxnet3_get_rx_csum,
.set_rx_csum = vmxnet3_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_hw_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_strings = vmxnet3_get_strings,
.get_flags = vmxnet3_get_flags,
.set_flags = vmxnet3_set_flags,
.get_sset_count = vmxnet3_get_sset_count,
.get_ethtool_stats = vmxnet3_get_ethtool_stats,
.get_ringparam = vmxnet3_get_ringparam,
.set_ringparam = vmxnet3_set_ringparam,
};
void vmxnet3_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops);
}
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
*
*/
#ifndef _VMXNET3_INT_H
#define _VMXNET3_INT_H
#include <linux/types.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/ioport.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/uaccess.h>
#include <asm/dma.h>
#include <asm/page.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
#include <asm/checksum.h>
#include <linux/if_vlan.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
#include <linux/dst.h>
#include "vmxnet3_defs.h"
#ifdef DEBUG
# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)"
#else
# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI"
#endif
/*
* Version numbers
*/
#define VMXNET3_DRIVER_VERSION_STRING "1.0.5.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
#define VMXNET3_DRIVER_VERSION_NUM 0x01000500
/*
* Capabilities
*/
enum {
VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */
VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over
* IPv4 */
VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */
VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */
VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */
VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation
* offload */
VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */
VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */
VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */
VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */
VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */
VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */
VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */
VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */
VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries
* for a pkt */
VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */
VMNET_CAP_LPD = 0x10000, /* large pkt delivery */
VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/
VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/
/* pages transmits */
VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */
VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */
VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */
/* pkts up to 256kB. */
VMNET_CAP_UPT = 0x400000 /* Support UPT */
};
/*
* PCI vendor and device IDs.
*/
#define PCI_VENDOR_ID_VMWARE 0x15AD
#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0
#define MAX_ETHERNET_CARDS 10
#define MAX_PCI_PASSTHRU_DEVICE 6
struct vmxnet3_cmd_ring {
union Vmxnet3_GenericDesc *base;
u32 size;
u32 next2fill;
u32 next2comp;
u8 gen;
dma_addr_t basePA;
};
static inline void
vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
{
ring->next2fill++;
if (unlikely(ring->next2fill == ring->size)) {
ring->next2fill = 0;
VMXNET3_FLIP_RING_GEN(ring->gen);
}
}
static inline void
vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
{
VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
}
static inline int
vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
{
return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
ring->next2comp - ring->next2fill - 1;
}
struct vmxnet3_comp_ring {
union Vmxnet3_GenericDesc *base;
u32 size;
u32 next2proc;
u8 gen;
u8 intr_idx;
dma_addr_t basePA;
};
static inline void
vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
{
ring->next2proc++;
if (unlikely(ring->next2proc == ring->size)) {
ring->next2proc = 0;
VMXNET3_FLIP_RING_GEN(ring->gen);
}
}
struct vmxnet3_tx_data_ring {
struct Vmxnet3_TxDataDesc *base;
u32 size;
dma_addr_t basePA;
};
enum vmxnet3_buf_map_type {
VMXNET3_MAP_INVALID = 0,
VMXNET3_MAP_NONE,
VMXNET3_MAP_SINGLE,
VMXNET3_MAP_PAGE,
};
struct vmxnet3_tx_buf_info {
u32 map_type;
u16 len;
u16 sop_idx;
dma_addr_t dma_addr;
struct sk_buff *skb;
};
struct vmxnet3_tq_driver_stats {
u64 drop_total; /* # of pkts dropped by the driver, the
* counters below track droppings due to
* different reasons
*/
u64 drop_too_many_frags;
u64 drop_oversized_hdr;
u64 drop_hdr_inspect_err;
u64 drop_tso;
u64 tx_ring_full;
u64 linearized; /* # of pkts linearized */
u64 copy_skb_header; /* # of times we have to copy skb header */
u64 oversized_hdr;
};
struct vmxnet3_tx_ctx {
bool ipv4;
u16 mss;
u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum
* offloading
*/
u32 l4_hdr_size; /* only valid if mss != 0 */
u32 copy_size; /* # of bytes copied into the data ring */
union Vmxnet3_GenericDesc *sop_txd;
union Vmxnet3_GenericDesc *eop_txd;
};
struct vmxnet3_tx_queue {
spinlock_t tx_lock;
struct vmxnet3_cmd_ring tx_ring;
struct vmxnet3_tx_buf_info *buf_info;
struct vmxnet3_tx_data_ring data_ring;
struct vmxnet3_comp_ring comp_ring;
struct Vmxnet3_TxQueueCtrl *shared;
struct vmxnet3_tq_driver_stats stats;
bool stopped;
int num_stop; /* # of times the queue is
* stopped */
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
enum vmxnet3_rx_buf_type {
VMXNET3_RX_BUF_NONE = 0,
VMXNET3_RX_BUF_SKB = 1,
VMXNET3_RX_BUF_PAGE = 2
};
struct vmxnet3_rx_buf_info {
enum vmxnet3_rx_buf_type buf_type;
u16 len;
union {
struct sk_buff *skb;
struct page *page;
};
dma_addr_t dma_addr;
};
struct vmxnet3_rx_ctx {
struct sk_buff *skb;
u32 sop_idx;
};
struct vmxnet3_rq_driver_stats {
u64 drop_total;
u64 drop_err;
u64 drop_fcs;
u64 rx_buf_alloc_failure;
};
struct vmxnet3_rx_queue {
struct vmxnet3_cmd_ring rx_ring[2];
struct vmxnet3_comp_ring comp_ring;
struct vmxnet3_rx_ctx rx_ctx;
u32 qid; /* rqID in RCD for buffer from 1st ring */
u32 qid2; /* rqID in RCD for buffer from 2nd ring */
u32 uncommitted[2]; /* # of buffers allocated since last RXPROD
* update */
struct vmxnet3_rx_buf_info *buf_info[2];
struct Vmxnet3_RxQueueCtrl *shared;
struct vmxnet3_rq_driver_stats stats;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define VMXNET3_LINUX_MAX_MSIX_VECT 1
struct vmxnet3_intr {
enum vmxnet3_intr_mask_mode mask_mode;
enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */
u8 num_intrs; /* # of intr vectors */
u8 event_intr_idx; /* idx of the intr vector for event */
u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
#ifdef CONFIG_PCI_MSI
struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
#endif
};
#define VMXNET3_STATE_BIT_RESETTING 0
#define VMXNET3_STATE_BIT_QUIESCED 1
struct vmxnet3_adapter {
struct vmxnet3_tx_queue tx_queue;
struct vmxnet3_rx_queue rx_queue;
struct napi_struct napi;
struct vlan_group *vlan_grp;
struct vmxnet3_intr intr;
struct Vmxnet3_DriverShared *shared;
struct Vmxnet3_PMConf *pm_conf;
struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */
struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */
struct net_device *netdev;
struct pci_dev *pdev;
u8 *hw_addr0; /* for BAR 0 */
u8 *hw_addr1; /* for BAR 1 */
/* feature control */
bool rxcsum;
bool lro;
bool jumbo_frame;
/* rx buffer related */
unsigned skb_buf_size;
int rx_buf_per_pkt; /* only apply to the 1st ring */
dma_addr_t shared_pa;
dma_addr_t queue_desc_pa;
/* Wake-on-LAN */
u32 wol;
/* Link speed */
u32 link_speed; /* in mbps */
u64 tx_timeout_count;
struct work_struct work;
unsigned long state; /* VMXNET3_STATE_BIT_xxx */
int dev_number;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
writel((val), (adapter)->hw_addr0 + (reg))
#define VMXNET3_READ_BAR0_REG(adapter, reg) \
readl((adapter)->hw_addr0 + (reg))
#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
writel((val), (adapter)->hw_addr1 + (reg))
#define VMXNET3_READ_BAR1_REG(adapter, reg) \
readl((adapter)->hw_addr1 + (reg))
#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
((rq)->rx_ring[ring_idx].size >> 3)
#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
#define VMXNET3_DEF_RX_RING_SIZE 256
#define VMXNET3_MAX_ETH_HDR_SIZE 22
#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
int
vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
void
vmxnet3_force_close(struct vmxnet3_adapter *adapter);
void
vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
void
vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter);
void
vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter);
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
extern struct net_device_stats *vmxnet3_get_stats(struct net_device *netdev);
extern char vmxnet3_driver_name[];
#endif
...@@ -58,8 +58,7 @@ struct cisco_state { ...@@ -58,8 +58,7 @@ struct cisco_state {
spinlock_t lock; spinlock_t lock;
unsigned long last_poll; unsigned long last_poll;
int up; int up;
int request_sent; u32 txseq; /* TX sequence number, 0 = none */
u32 txseq; /* TX sequence number */
u32 rxseq; /* RX sequence number */ u32 rxseq; /* RX sequence number */
}; };
...@@ -163,6 +162,7 @@ static int cisco_rx(struct sk_buff *skb) ...@@ -163,6 +162,7 @@ static int cisco_rx(struct sk_buff *skb)
struct cisco_packet *cisco_data; struct cisco_packet *cisco_data;
struct in_device *in_dev; struct in_device *in_dev;
__be32 addr, mask; __be32 addr, mask;
u32 ack;
if (skb->len < sizeof(struct hdlc_header)) if (skb->len < sizeof(struct hdlc_header))
goto rx_error; goto rx_error;
...@@ -223,8 +223,10 @@ static int cisco_rx(struct sk_buff *skb) ...@@ -223,8 +223,10 @@ static int cisco_rx(struct sk_buff *skb)
case CISCO_KEEPALIVE_REQ: case CISCO_KEEPALIVE_REQ:
spin_lock(&st->lock); spin_lock(&st->lock);
st->rxseq = ntohl(cisco_data->par1); st->rxseq = ntohl(cisco_data->par1);
if (st->request_sent && ack = ntohl(cisco_data->par2);
ntohl(cisco_data->par2) == st->txseq) { if (ack && (ack == st->txseq ||
/* our current REQ may be in transit */
ack == st->txseq - 1)) {
st->last_poll = jiffies; st->last_poll = jiffies;
if (!st->up) { if (!st->up) {
u32 sec, min, hrs, days; u32 sec, min, hrs, days;
...@@ -275,7 +277,6 @@ static void cisco_timer(unsigned long arg) ...@@ -275,7 +277,6 @@ static void cisco_timer(unsigned long arg)
cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
htonl(st->rxseq)); htonl(st->rxseq));
st->request_sent = 1;
spin_unlock(&st->lock); spin_unlock(&st->lock);
st->timer.expires = jiffies + st->settings.interval * HZ; st->timer.expires = jiffies + st->settings.interval * HZ;
...@@ -293,9 +294,7 @@ static void cisco_start(struct net_device *dev) ...@@ -293,9 +294,7 @@ static void cisco_start(struct net_device *dev)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&st->lock, flags); spin_lock_irqsave(&st->lock, flags);
st->up = 0; st->up = st->txseq = st->rxseq = 0;
st->request_sent = 0;
st->txseq = st->rxseq = 0;
spin_unlock_irqrestore(&st->lock, flags); spin_unlock_irqrestore(&st->lock, flags);
init_timer(&st->timer); init_timer(&st->timer);
...@@ -317,8 +316,7 @@ static void cisco_stop(struct net_device *dev) ...@@ -317,8 +316,7 @@ static void cisco_stop(struct net_device *dev)
spin_lock_irqsave(&st->lock, flags); spin_lock_irqsave(&st->lock, flags);
netif_dormant_on(dev); netif_dormant_on(dev);
st->up = 0; st->up = st->txseq = 0;
st->request_sent = 0;
spin_unlock_irqrestore(&st->lock, flags); spin_unlock_irqrestore(&st->lock, flags);
} }
......
...@@ -690,7 +690,10 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) ...@@ -690,7 +690,10 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
} }
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
local_bh_disable();
ieee80211_rx(dev->wl->hw, skb); ieee80211_rx(dev->wl->hw, skb);
local_bh_enable();
#if B43_DEBUG #if B43_DEBUG
dev->rx_count++; dev->rx_count++;
......
...@@ -3156,8 +3156,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3156,8 +3156,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
out_pci_disable_device: out_pci_disable_device:
pci_disable_device(pdev); pci_disable_device(pdev);
out_ieee80211_free_hw: out_ieee80211_free_hw:
ieee80211_free_hw(priv->hw);
iwl_free_traffic_mem(priv); iwl_free_traffic_mem(priv);
ieee80211_free_hw(priv->hw);
out: out:
return err; return err;
} }
......
...@@ -4099,8 +4099,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e ...@@ -4099,8 +4099,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev); pci_disable_device(pdev);
out_ieee80211_free_hw: out_ieee80211_free_hw:
ieee80211_free_hw(priv->hw);
iwl_free_traffic_mem(priv); iwl_free_traffic_mem(priv);
ieee80211_free_hw(priv->hw);
out: out:
return err; return err;
} }
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
* responses as well as events generated by firmware. * responses as well as events generated by firmware.
*/ */
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/sched.h>
#include <linux/if_arp.h> #include <linux/if_arp.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
......
...@@ -1669,6 +1669,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw); ...@@ -1669,6 +1669,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw);
* to this function and ieee80211_rx_irqsafe() may not be mixed for a * to this function and ieee80211_rx_irqsafe() may not be mixed for a
* single hardware. * single hardware.
* *
* Note that right now, this function must be called with softirqs disabled.
*
* @hw: the hardware this frame came in on * @hw: the hardware this frame came in on
* @skb: the buffer to receive, owned by mac80211 after this call * @skb: the buffer to receive, owned by mac80211 after this call
*/ */
......
...@@ -226,12 +226,12 @@ struct sock { ...@@ -226,12 +226,12 @@ struct sock {
#define sk_prot __sk_common.skc_prot #define sk_prot __sk_common.skc_prot
#define sk_net __sk_common.skc_net #define sk_net __sk_common.skc_net
kmemcheck_bitfield_begin(flags); kmemcheck_bitfield_begin(flags);
unsigned char sk_shutdown : 2, unsigned int sk_shutdown : 2,
sk_no_check : 2, sk_no_check : 2,
sk_userlocks : 4; sk_userlocks : 4,
sk_protocol : 8,
sk_type : 16;
kmemcheck_bitfield_end(flags); kmemcheck_bitfield_end(flags);
unsigned char sk_protocol;
unsigned short sk_type;
int sk_rcvbuf; int sk_rcvbuf;
socket_lock_t sk_lock; socket_lock_t sk_lock;
/* /*
......
...@@ -644,6 +644,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, ...@@ -644,6 +644,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
/* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--;
inet_rsk(req)->acked = 1; inet_rsk(req)->acked = 1;
return NULL; return NULL;
} }
......
...@@ -845,6 +845,42 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset, ...@@ -845,6 +845,42 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
return ret; return ret;
} }
/**
* first_packet_length - return length of first packet in receive queue
* @sk: socket
*
* Drops all bad checksum frames, until a valid one is found.
* Returns the length of found skb, or 0 if none is found.
*/
static unsigned int first_packet_length(struct sock *sk)
{
struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
struct sk_buff *skb;
unsigned int res;
__skb_queue_head_init(&list_kill);
spin_lock_bh(&rcvq->lock);
while ((skb = skb_peek(rcvq)) != NULL &&
udp_lib_checksum_complete(skb)) {
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
__skb_unlink(skb, rcvq);
__skb_queue_tail(&list_kill, skb);
}
res = skb ? skb->len : 0;
spin_unlock_bh(&rcvq->lock);
if (!skb_queue_empty(&list_kill)) {
lock_sock(sk);
__skb_queue_purge(&list_kill);
sk_mem_reclaim_partial(sk);
release_sock(sk);
}
return res;
}
/* /*
* IOCTL requests applicable to the UDP protocol * IOCTL requests applicable to the UDP protocol
*/ */
...@@ -861,21 +897,16 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) ...@@ -861,21 +897,16 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
case SIOCINQ: case SIOCINQ:
{ {
struct sk_buff *skb; unsigned int amount = first_packet_length(sk);
unsigned long amount;
amount = 0; if (amount)
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL) {
/* /*
* We will only return the amount * We will only return the amount
* of this packet since that is all * of this packet since that is all
* that will be read. * that will be read.
*/ */
amount = skb->len - sizeof(struct udphdr); amount -= sizeof(struct udphdr);
}
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg); return put_user(amount, (int __user *)arg);
} }
...@@ -1544,29 +1575,11 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) ...@@ -1544,29 +1575,11 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
{ {
unsigned int mask = datagram_poll(file, sock, wait); unsigned int mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
int is_lite = IS_UDPLITE(sk);
/* Check for false positives due to checksum errors */ /* Check for false positives due to checksum errors */
if ((mask & POLLRDNORM) && if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
!(file->f_flags & O_NONBLOCK) && !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk))
!(sk->sk_shutdown & RCV_SHUTDOWN)) { mask &= ~(POLLIN | POLLRDNORM);
struct sk_buff_head *rcvq = &sk->sk_receive_queue;
struct sk_buff *skb;
spin_lock_bh(&rcvq->lock);
while ((skb = skb_peek(rcvq)) != NULL &&
udp_lib_checksum_complete(skb)) {
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INERRORS, is_lite);
__skb_unlink(skb, rcvq);
kfree_skb(skb);
}
spin_unlock_bh(&rcvq->lock);
/* nothing to see, move along */
if (skb == NULL)
mask &= ~(POLLIN | POLLRDNORM);
}
return mask; return mask;
......
...@@ -544,7 +544,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) ...@@ -544,7 +544,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
"%pM\n", bss->cbss.bssid, ifibss->bssid); "%pM\n", bss->cbss.bssid, ifibss->bssid);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */ #endif /* CONFIG_MAC80211_IBSS_DEBUG */
if (bss && memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN)) { if (bss && !memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN)) {
printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
" based on configured SSID\n", " based on configured SSID\n",
sdata->dev->name, bss->cbss.bssid); sdata->dev->name, bss->cbss.bssid);
...@@ -829,7 +829,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) ...@@ -829,7 +829,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
if (!sdata->u.ibss.ssid_len) if (!sdata->u.ibss.ssid_len)
continue; continue;
sdata->u.ibss.last_scan_completed = jiffies; sdata->u.ibss.last_scan_completed = jiffies;
ieee80211_sta_find_ibss(sdata); mod_timer(&sdata->u.ibss.timer, 0);
} }
mutex_unlock(&local->iflist_mtx); mutex_unlock(&local->iflist_mtx);
} }
......
...@@ -2453,6 +2453,8 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) ...@@ -2453,6 +2453,8 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_supported_band *sband; struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
WARN_ON_ONCE(softirq_count() == 0);
if (WARN_ON(status->band < 0 || if (WARN_ON(status->band < 0 ||
status->band >= IEEE80211_NUM_BANDS)) status->band >= IEEE80211_NUM_BANDS))
goto drop; goto drop;
......
...@@ -34,7 +34,7 @@ static struct tcf_hashinfo pedit_hash_info = { ...@@ -34,7 +34,7 @@ static struct tcf_hashinfo pedit_hash_info = {
}; };
static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = { static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
[TCA_PEDIT_PARMS] = { .len = sizeof(struct tcf_pedit) }, [TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) },
}; };
static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment