Commit 925068dc authored by David S. Miller's avatar David S. Miller

Merge branch 'davem-next' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

parents 83aa2e96 67fbbe15
This diff is collapsed.
...@@ -1694,26 +1694,6 @@ config VIA_RHINE_MMIO ...@@ -1694,26 +1694,6 @@ config VIA_RHINE_MMIO
If unsure, say Y. If unsure, say Y.
config VIA_RHINE_NAPI
bool "Use Rx Polling (NAPI)"
depends on VIA_RHINE
help
NAPI is a new driver API designed to reduce CPU and interrupt load
when the driver is receiving lots of packets from the card.
If your estimated Rx load is 10kpps or more, or if the card will be
deployed on potentially unfriendly networks (e.g. in a firewall),
then say Y here.
config LAN_SAA9730
bool "Philips SAA9730 Ethernet support"
depends on NET_PCI && PCI && MIPS_ATLAS
help
The SAA9730 is a combined multimedia and peripheral controller used
in thin clients, Internet access terminals, and diskless
workstations.
See <http://www.semiconductors.philips.com/pip/SAA9730_flyer_1>.
config SC92031 config SC92031
tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)" tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
depends on NET_PCI && PCI && EXPERIMENTAL depends on NET_PCI && PCI && EXPERIMENTAL
...@@ -2029,6 +2009,15 @@ config IGB ...@@ -2029,6 +2009,15 @@ config IGB
To compile this driver as a module, choose M here. The module To compile this driver as a module, choose M here. The module
will be called igb. will be called igb.
config IGB_LRO
bool "Use software LRO"
depends on IGB && INET
select INET_LRO
---help---
Say Y here if you want to use large receive offload.
If in doubt, say N.
source "drivers/net/ixp2000/Kconfig" source "drivers/net/ixp2000/Kconfig"
config MYRI_SBUS config MYRI_SBUS
...@@ -2273,10 +2262,6 @@ config GIANFAR ...@@ -2273,10 +2262,6 @@ config GIANFAR
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
and MPC86xx family of chips, and the FEC on the 8540. and MPC86xx family of chips, and the FEC on the 8540.
config GFAR_NAPI
bool "Use Rx Polling (NAPI)"
depends on GIANFAR
config UCC_GETH config UCC_GETH
tristate "Freescale QE Gigabit Ethernet" tristate "Freescale QE Gigabit Ethernet"
depends on QUICC_ENGINE depends on QUICC_ENGINE
...@@ -2285,10 +2270,6 @@ config UCC_GETH ...@@ -2285,10 +2270,6 @@ config UCC_GETH
This driver supports the Gigabit Ethernet mode of the QUICC Engine, This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs. which is available on some Freescale SOCs.
config UGETH_NAPI
bool "Use Rx Polling (NAPI)"
depends on UCC_GETH
config UGETH_MAGIC_PACKET config UGETH_MAGIC_PACKET
bool "Magic Packet detection support" bool "Magic Packet detection support"
depends on UCC_GETH depends on UCC_GETH
...@@ -2378,14 +2359,6 @@ config CHELSIO_T1_1G ...@@ -2378,14 +2359,6 @@ config CHELSIO_T1_1G
Enables support for Chelsio's gigabit Ethernet PCI cards. If you Enables support for Chelsio's gigabit Ethernet PCI cards. If you
are using only 10G cards say 'N' here. are using only 10G cards say 'N' here.
config CHELSIO_T1_NAPI
bool "Use Rx Polling (NAPI)"
depends on CHELSIO_T1
default y
help
NAPI is a driver API designed to reduce CPU and interrupt load
when the driver is receiving lots of packets from the card.
config CHELSIO_T3 config CHELSIO_T3
tristate "Chelsio Communications T3 10Gb Ethernet support" tristate "Chelsio Communications T3 10Gb Ethernet support"
depends on PCI && INET depends on PCI && INET
...@@ -2457,20 +2430,6 @@ config IXGB ...@@ -2457,20 +2430,6 @@ config IXGB
To compile this driver as a module, choose M here. The module To compile this driver as a module, choose M here. The module
will be called ixgb. will be called ixgb.
config IXGB_NAPI
bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
depends on IXGB && EXPERIMENTAL
help
NAPI is a new driver API designed to reduce CPU and interrupt load
when the driver is receiving lots of packets from the card. It is
still somewhat experimental and thus not yet enabled by default.
If your estimated Rx load is 10kpps or more, or if the card will be
deployed on potentially unfriendly networks (e.g. in a firewall),
then say Y here.
If in doubt, say N.
config S2IO config S2IO
tristate "S2IO 10Gbe XFrame NIC" tristate "S2IO 10Gbe XFrame NIC"
depends on PCI depends on PCI
......
...@@ -166,7 +166,6 @@ obj-$(CONFIG_EEXPRESS_PRO) += eepro.o ...@@ -166,7 +166,6 @@ obj-$(CONFIG_EEXPRESS_PRO) += eepro.o
obj-$(CONFIG_8139CP) += 8139cp.o obj-$(CONFIG_8139CP) += 8139cp.o
obj-$(CONFIG_8139TOO) += 8139too.o obj-$(CONFIG_8139TOO) += 8139too.o
obj-$(CONFIG_ZNET) += znet.o obj-$(CONFIG_ZNET) += znet.o
obj-$(CONFIG_LAN_SAA9730) += saa9730.o
obj-$(CONFIG_CPMAC) += cpmac.o obj-$(CONFIG_CPMAC) += cpmac.o
obj-$(CONFIG_DEPCA) += depca.o obj-$(CONFIG_DEPCA) += depca.o
obj-$(CONFIG_EWRK3) += ewrk3.o obj-$(CONFIG_EWRK3) += ewrk3.o
......
...@@ -1153,9 +1153,7 @@ static int __devinit init_one(struct pci_dev *pdev, ...@@ -1153,9 +1153,7 @@ static int __devinit init_one(struct pci_dev *pdev,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = t1_netpoll; netdev->poll_controller = t1_netpoll;
#endif #endif
#ifdef CONFIG_CHELSIO_T1_NAPI
netif_napi_add(netdev, &adapter->napi, t1_poll, 64); netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
#endif
SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
} }
......
...@@ -1396,20 +1396,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) ...@@ -1396,20 +1396,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
if (unlikely(adapter->vlan_grp && p->vlan_valid)) { if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
st->vlan_xtract++; st->vlan_xtract++;
#ifdef CONFIG_CHELSIO_T1_NAPI
vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
ntohs(p->vlan)); ntohs(p->vlan));
#else } else
vlan_hwaccel_rx(skb, adapter->vlan_grp,
ntohs(p->vlan));
#endif
} else {
#ifdef CONFIG_CHELSIO_T1_NAPI
netif_receive_skb(skb); netif_receive_skb(skb);
#else
netif_rx(skb);
#endif
}
} }
/* /*
...@@ -1568,7 +1558,6 @@ static inline int responses_pending(const struct adapter *adapter) ...@@ -1568,7 +1558,6 @@ static inline int responses_pending(const struct adapter *adapter)
return (e->GenerationBit == Q->genbit); return (e->GenerationBit == Q->genbit);
} }
#ifdef CONFIG_CHELSIO_T1_NAPI
/* /*
* A simpler version of process_responses() that handles only pure (i.e., * A simpler version of process_responses() that handles only pure (i.e.,
* non data-carrying) responses. Such respones are too light-weight to justify * non data-carrying) responses. Such respones are too light-weight to justify
...@@ -1636,9 +1625,6 @@ int t1_poll(struct napi_struct *napi, int budget) ...@@ -1636,9 +1625,6 @@ int t1_poll(struct napi_struct *napi, int budget)
return work_done; return work_done;
} }
/*
* NAPI version of the main interrupt handler.
*/
irqreturn_t t1_interrupt(int irq, void *data) irqreturn_t t1_interrupt(int irq, void *data)
{ {
struct adapter *adapter = data; struct adapter *adapter = data;
...@@ -1656,7 +1642,8 @@ irqreturn_t t1_interrupt(int irq, void *data) ...@@ -1656,7 +1642,8 @@ irqreturn_t t1_interrupt(int irq, void *data)
else { else {
/* no data, no NAPI needed */ /* no data, no NAPI needed */
writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
napi_enable(&adapter->napi); /* undo schedule_prep */ /* undo schedule_prep */
napi_enable(&adapter->napi);
} }
} }
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -1672,53 +1659,6 @@ irqreturn_t t1_interrupt(int irq, void *data) ...@@ -1672,53 +1659,6 @@ irqreturn_t t1_interrupt(int irq, void *data)
return IRQ_RETVAL(handled != 0); return IRQ_RETVAL(handled != 0);
} }
#else
/*
* Main interrupt handler, optimized assuming that we took a 'DATA'
* interrupt.
*
* 1. Clear the interrupt
* 2. Loop while we find valid descriptors and process them; accumulate
* information that can be processed after the loop
* 3. Tell the SGE at which index we stopped processing descriptors
* 4. Bookkeeping; free TX buffers, ring doorbell if there are any
* outstanding TX buffers waiting, replenish RX buffers, potentially
* reenable upper layers if they were turned off due to lack of TX
* resources which are available again.
* 5. If we took an interrupt, but no valid respQ descriptors was found we
* let the slow_intr_handler run and do error handling.
*/
irqreturn_t t1_interrupt(int irq, void *cookie)
{
int work_done;
struct adapter *adapter = cookie;
struct respQ *Q = &adapter->sge->respQ;
spin_lock(&adapter->async_lock);
writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
if (likely(responses_pending(adapter)))
work_done = process_responses(adapter, -1);
else
work_done = t1_slow_intr_handler(adapter);
/*
* The unconditional clearing of the PL_CAUSE above may have raced
* with DMA completion and the corresponding generation of a response
* to cause us to miss the resulting data interrupt. The next write
* is also unconditional to recover the missed interrupt and render
* this race harmless.
*/
writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
if (!work_done)
adapter->sge->stats.unhandled_irqs++;
spin_unlock(&adapter->async_lock);
return IRQ_RETVAL(work_done != 0);
}
#endif
/* /*
* Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
* *
......
...@@ -110,10 +110,7 @@ struct ulp_iscsi_info { ...@@ -110,10 +110,7 @@ struct ulp_iscsi_info {
unsigned int llimit; unsigned int llimit;
unsigned int ulimit; unsigned int ulimit;
unsigned int tagmask; unsigned int tagmask;
unsigned int pgsz3; u8 pgsz_factor[4];
unsigned int pgsz2;
unsigned int pgsz1;
unsigned int pgsz0;
unsigned int max_rxsz; unsigned int max_rxsz;
unsigned int max_txsz; unsigned int max_txsz;
struct pci_dev *pdev; struct pci_dev *pdev;
......
...@@ -207,6 +207,17 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, ...@@ -207,6 +207,17 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
break; break;
case ULP_ISCSI_SET_PARAMS: case ULP_ISCSI_SET_PARAMS:
t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
/* set MaxRxData and MaxCoalesceSize to 16224 */
t3_write_reg(adapter, A_TP_PARA_REG2, 0x3f603f60);
/* program the ddp page sizes */
{
int i;
unsigned int val = 0;
for (i = 0; i < 4; i++)
val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
if (val)
t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
}
break; break;
default: default:
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
......
...@@ -1517,16 +1517,18 @@ ...@@ -1517,16 +1517,18 @@
#define A_ULPRX_ISCSI_TAGMASK 0x514 #define A_ULPRX_ISCSI_TAGMASK 0x514
#define S_HPZ0 0 #define A_ULPRX_ISCSI_PSZ 0x518
#define M_HPZ0 0xf
#define V_HPZ0(x) ((x) << S_HPZ0)
#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
#define A_ULPRX_TDDP_LLIMIT 0x51c #define A_ULPRX_TDDP_LLIMIT 0x51c
#define A_ULPRX_TDDP_ULIMIT 0x520 #define A_ULPRX_TDDP_ULIMIT 0x520
#define A_ULPRX_TDDP_PSZ 0x528 #define A_ULPRX_TDDP_PSZ 0x528
#define S_HPZ0 0
#define M_HPZ0 0xf
#define V_HPZ0(x) ((x) << S_HPZ0)
#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
#define A_ULPRX_STAG_LLIMIT 0x52c #define A_ULPRX_STAG_LLIMIT 0x52c
#define A_ULPRX_STAG_ULIMIT 0x530 #define A_ULPRX_STAG_ULIMIT 0x530
......
...@@ -191,6 +191,9 @@ union opcode_tid { ...@@ -191,6 +191,9 @@ union opcode_tid {
#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF) #define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
#define G_TID(x) ((x) & 0xFFFFFF) #define G_TID(x) ((x) & 0xFFFFFF)
#define S_QNUM 0
#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF)
#define S_HASHTYPE 22 #define S_HASHTYPE 22
#define M_HASHTYPE 0x3 #define M_HASHTYPE 0x3
#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE) #define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE)
...@@ -779,6 +782,12 @@ struct tx_data_wr { ...@@ -779,6 +782,12 @@ struct tx_data_wr {
__be32 param; __be32 param;
}; };
/* tx_data_wr.flags fields */
#define S_TX_ACK_PAGES 21
#define M_TX_ACK_PAGES 0x7
#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES)
#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES)
/* tx_data_wr.param fields */ /* tx_data_wr.param fields */
#define S_TX_PORT 0 #define S_TX_PORT 0
#define M_TX_PORT 0x7 #define M_TX_PORT 0x7
...@@ -1452,4 +1461,35 @@ struct cpl_rdma_terminate { ...@@ -1452,4 +1461,35 @@ struct cpl_rdma_terminate {
#define M_TERM_TID 0xFFFFF #define M_TERM_TID 0xFFFFF
#define V_TERM_TID(x) ((x) << S_TERM_TID) #define V_TERM_TID(x) ((x) << S_TERM_TID)
#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID) #define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
/* ULP_TX opcodes */
enum { ULP_MEM_READ = 2, ULP_MEM_WRITE = 3, ULP_TXPKT = 4 };
#define S_ULPTX_CMD 28
#define M_ULPTX_CMD 0xF
#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD)
#define S_ULPTX_NFLITS 0
#define M_ULPTX_NFLITS 0xFF
#define V_ULPTX_NFLITS(x) ((x) << S_ULPTX_NFLITS)
struct ulp_mem_io {
WR_HDR;
__be32 cmd_lock_addr;
__be32 len;
};
/* ulp_mem_io.cmd_lock_addr fields */
#define S_ULP_MEMIO_ADDR 0
#define M_ULP_MEMIO_ADDR 0x7FFFFFF
#define V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR)
#define S_ULP_MEMIO_LOCK 27
#define V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK)
#define F_ULP_MEMIO_LOCK V_ULP_MEMIO_LOCK(1U)
/* ulp_mem_io.len fields */
#define S_ULP_MEMIO_DATA_LEN 28
#define M_ULP_MEMIO_DATA_LEN 0xF
#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
#endif /* T3_CPL_H */ #endif /* T3_CPL_H */
...@@ -64,6 +64,7 @@ struct t3cdev { ...@@ -64,6 +64,7 @@ struct t3cdev {
void *l3opt; /* optional layer 3 data */ void *l3opt; /* optional layer 3 data */
void *l4opt; /* optional layer 4 data */ void *l4opt; /* optional layer 4 data */
void *ulp; /* ulp stuff */ void *ulp; /* ulp stuff */
void *ulp_iscsi; /* ulp iscsi */
}; };
#endif /* _T3CDEV_H_ */ #endif /* _T3CDEV_H_ */
...@@ -44,8 +44,7 @@ ...@@ -44,8 +44,7 @@
* happen immediately, but will wait until either a set number * happen immediately, but will wait until either a set number
* of frames or amount of time have passed). In NAPI, the * of frames or amount of time have passed). In NAPI, the
* interrupt handler will signal there is work to be done, and * interrupt handler will signal there is work to be done, and
* exit. Without NAPI, the packet(s) will be handled * exit. This method will start at the last known empty
* immediately. Both methods will start at the last known empty
* descriptor, and process every subsequent descriptor until there * descriptor, and process every subsequent descriptor until there
* are none left with data (NAPI will stop after a set number of * are none left with data (NAPI will stop after a set number of
* packets to give time to other tasks, but will eventually * packets to give time to other tasks, but will eventually
...@@ -101,12 +100,6 @@ ...@@ -101,12 +100,6 @@
#undef BRIEF_GFAR_ERRORS #undef BRIEF_GFAR_ERRORS
#undef VERBOSE_GFAR_ERRORS #undef VERBOSE_GFAR_ERRORS
#ifdef CONFIG_GFAR_NAPI
#define RECEIVE(x) netif_receive_skb(x)
#else
#define RECEIVE(x) netif_rx(x)
#endif
const char gfar_driver_name[] = "Gianfar Ethernet"; const char gfar_driver_name[] = "Gianfar Ethernet";
const char gfar_driver_version[] = "1.3"; const char gfar_driver_version[] = "1.3";
...@@ -131,9 +124,7 @@ static void free_skb_resources(struct gfar_private *priv); ...@@ -131,9 +124,7 @@ static void free_skb_resources(struct gfar_private *priv);
static void gfar_set_multi(struct net_device *dev); static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
static void gfar_configure_serdes(struct net_device *dev); static void gfar_configure_serdes(struct net_device *dev);
#ifdef CONFIG_GFAR_NAPI
static int gfar_poll(struct napi_struct *napi, int budget); static int gfar_poll(struct napi_struct *napi, int budget);
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev); static void gfar_netpoll(struct net_device *dev);
#endif #endif
...@@ -260,9 +251,7 @@ static int gfar_probe(struct platform_device *pdev) ...@@ -260,9 +251,7 @@ static int gfar_probe(struct platform_device *pdev)
dev->hard_start_xmit = gfar_start_xmit; dev->hard_start_xmit = gfar_start_xmit;
dev->tx_timeout = gfar_timeout; dev->tx_timeout = gfar_timeout;
dev->watchdog_timeo = TX_TIMEOUT; dev->watchdog_timeo = TX_TIMEOUT;
#ifdef CONFIG_GFAR_NAPI
netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT); netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = gfar_netpoll; dev->poll_controller = gfar_netpoll;
#endif #endif
...@@ -363,11 +352,7 @@ static int gfar_probe(struct platform_device *pdev) ...@@ -363,11 +352,7 @@ static int gfar_probe(struct platform_device *pdev)
/* Even more device info helps when determining which kernel */ /* Even more device info helps when determining which kernel */
/* provided which set of benchmarks. */ /* provided which set of benchmarks. */
#ifdef CONFIG_GFAR_NAPI
printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
#else
printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
#endif
printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
dev->name, priv->rx_ring_size, priv->tx_ring_size); dev->name, priv->rx_ring_size, priv->tx_ring_size);
...@@ -945,14 +930,10 @@ int startup_gfar(struct net_device *dev) ...@@ -945,14 +930,10 @@ int startup_gfar(struct net_device *dev)
/* Returns 0 for success. */ /* Returns 0 for success. */
static int gfar_enet_open(struct net_device *dev) static int gfar_enet_open(struct net_device *dev)
{ {
#ifdef CONFIG_GFAR_NAPI
struct gfar_private *priv = netdev_priv(dev); struct gfar_private *priv = netdev_priv(dev);
#endif
int err; int err;
#ifdef CONFIG_GFAR_NAPI
napi_enable(&priv->napi); napi_enable(&priv->napi);
#endif
/* Initialize a bunch of registers */ /* Initialize a bunch of registers */
init_registers(dev); init_registers(dev);
...@@ -962,17 +943,13 @@ static int gfar_enet_open(struct net_device *dev) ...@@ -962,17 +943,13 @@ static int gfar_enet_open(struct net_device *dev)
err = init_phy(dev); err = init_phy(dev);
if(err) { if(err) {
#ifdef CONFIG_GFAR_NAPI
napi_disable(&priv->napi); napi_disable(&priv->napi);
#endif
return err; return err;
} }
err = startup_gfar(dev); err = startup_gfar(dev);
if (err) { if (err) {
#ifdef CONFIG_GFAR_NAPI
napi_disable(&priv->napi); napi_disable(&priv->napi);
#endif
return err; return err;
} }
...@@ -1128,9 +1105,7 @@ static int gfar_close(struct net_device *dev) ...@@ -1128,9 +1105,7 @@ static int gfar_close(struct net_device *dev)
{ {
struct gfar_private *priv = netdev_priv(dev); struct gfar_private *priv = netdev_priv(dev);
#ifdef CONFIG_GFAR_NAPI
napi_disable(&priv->napi); napi_disable(&priv->napi);
#endif
stop_gfar(dev); stop_gfar(dev);
...@@ -1427,14 +1402,9 @@ irqreturn_t gfar_receive(int irq, void *dev_id) ...@@ -1427,14 +1402,9 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
{ {
struct net_device *dev = (struct net_device *) dev_id; struct net_device *dev = (struct net_device *) dev_id;
struct gfar_private *priv = netdev_priv(dev); struct gfar_private *priv = netdev_priv(dev);
#ifdef CONFIG_GFAR_NAPI
u32 tempval; u32 tempval;
#else
unsigned long flags;
#endif
/* support NAPI */ /* support NAPI */
#ifdef CONFIG_GFAR_NAPI
/* Clear IEVENT, so interrupts aren't called again /* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived */ * because of the packets that have already arrived */
gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
...@@ -1451,38 +1421,10 @@ irqreturn_t gfar_receive(int irq, void *dev_id) ...@@ -1451,38 +1421,10 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
dev->name, gfar_read(&priv->regs->ievent), dev->name, gfar_read(&priv->regs->ievent),
gfar_read(&priv->regs->imask)); gfar_read(&priv->regs->imask));
} }
#else
/* Clear IEVENT, so rx interrupt isn't called again
* because of this interrupt */
gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
spin_lock_irqsave(&priv->rxlock, flags);
gfar_clean_rx_ring(dev, priv->rx_ring_size);
/* If we are coalescing interrupts, update the timer */
/* Otherwise, clear it */
if (likely(priv->rxcoalescing)) {
gfar_write(&priv->regs->rxic, 0);
gfar_write(&priv->regs->rxic,
mk_ic_value(priv->rxcount, priv->rxtime));
}
spin_unlock_irqrestore(&priv->rxlock, flags);
#endif
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static inline int gfar_rx_vlan(struct sk_buff *skb,
struct vlan_group *vlgrp, unsigned short vlctl)
{
#ifdef CONFIG_GFAR_NAPI
return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
#else
return vlan_hwaccel_rx(skb, vlgrp, vlctl);
#endif
}
static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{ {
/* If valid headers were found, and valid sums /* If valid headers were found, and valid sums
...@@ -1539,10 +1481,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, ...@@ -1539,10 +1481,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
/* Send the packet up the stack */ /* Send the packet up the stack */
if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) {
ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl); ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp,
else fcb->vlctl);
ret = RECEIVE(skb); } else
ret = netif_receive_skb(skb);
if (NET_RX_DROP == ret) if (NET_RX_DROP == ret)
priv->extra_stats.kernel_dropped++; priv->extra_stats.kernel_dropped++;
...@@ -1629,7 +1572,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) ...@@ -1629,7 +1572,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
return howmany; return howmany;
} }
#ifdef CONFIG_GFAR_NAPI
static int gfar_poll(struct napi_struct *napi, int budget) static int gfar_poll(struct napi_struct *napi, int budget)
{ {
struct gfar_private *priv = container_of(napi, struct gfar_private, napi); struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
...@@ -1664,7 +1606,6 @@ static int gfar_poll(struct napi_struct *napi, int budget) ...@@ -1664,7 +1606,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
return howmany; return howmany;
} }
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
/* /*
...@@ -2003,11 +1944,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id) ...@@ -2003,11 +1944,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
gfar_receive(irq, dev_id); gfar_receive(irq, dev_id);
#ifndef CONFIG_GFAR_NAPI
/* Clear the halt bit in RSTAT */
gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
#endif
if (netif_msg_rx_err(priv)) if (netif_msg_rx_err(priv))
printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
dev->name, gfar_read(&priv->regs->rstat)); dev->name, gfar_read(&priv->regs->rstat));
......
...@@ -77,13 +77,8 @@ extern const char gfar_driver_name[]; ...@@ -77,13 +77,8 @@ extern const char gfar_driver_name[];
extern const char gfar_driver_version[]; extern const char gfar_driver_version[];
/* These need to be powers of 2 for this driver */ /* These need to be powers of 2 for this driver */
#ifdef CONFIG_GFAR_NAPI
#define DEFAULT_TX_RING_SIZE 256 #define DEFAULT_TX_RING_SIZE 256
#define DEFAULT_RX_RING_SIZE 256 #define DEFAULT_RX_RING_SIZE 256
#else
#define DEFAULT_TX_RING_SIZE 64
#define DEFAULT_RX_RING_SIZE 64
#endif
#define GFAR_RX_MAX_RING_SIZE 256 #define GFAR_RX_MAX_RING_SIZE 256
#define GFAR_TX_MAX_RING_SIZE 256 #define GFAR_TX_MAX_RING_SIZE 256
...@@ -128,14 +123,8 @@ extern const char gfar_driver_version[]; ...@@ -128,14 +123,8 @@ extern const char gfar_driver_version[];
#define DEFAULT_RXTIME 21 #define DEFAULT_RXTIME 21
/* Non NAPI Case */
#ifndef CONFIG_GFAR_NAPI
#define DEFAULT_RX_COALESCE 1
#define DEFAULT_RXCOUNT 16
#else
#define DEFAULT_RX_COALESCE 0 #define DEFAULT_RX_COALESCE 0
#define DEFAULT_RXCOUNT 0 #define DEFAULT_RXCOUNT 0
#endif /* CONFIG_GFAR_NAPI */
#define MIIMCFG_INIT_VALUE 0x00000007 #define MIIMCFG_INIT_VALUE 0x00000007
#define MIIMCFG_RESET 0x80000000 #define MIIMCFG_RESET 0x80000000
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/if_ether.h>
#include "e1000_mac.h" #include "e1000_mac.h"
#include "e1000_82575.h" #include "e1000_82575.h"
...@@ -45,7 +46,6 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *); ...@@ -45,7 +46,6 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *);
static s32 igb_init_hw_82575(struct e1000_hw *); static s32 igb_init_hw_82575(struct e1000_hw *);
static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
static void igb_rar_set_82575(struct e1000_hw *, u8 *, u32);
static s32 igb_reset_hw_82575(struct e1000_hw *); static s32 igb_reset_hw_82575(struct e1000_hw *);
static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
static s32 igb_setup_copper_link_82575(struct e1000_hw *); static s32 igb_setup_copper_link_82575(struct e1000_hw *);
...@@ -84,6 +84,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) ...@@ -84,6 +84,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_82575GB_QUAD_COPPER: case E1000_DEV_ID_82575GB_QUAD_COPPER:
mac->type = e1000_82575; mac->type = e1000_82575;
break; break;
case E1000_DEV_ID_82576:
case E1000_DEV_ID_82576_FIBER:
case E1000_DEV_ID_82576_SERDES:
case E1000_DEV_ID_82576_QUAD_COPPER:
mac->type = e1000_82576;
break;
default: default:
return -E1000_ERR_MAC_INIT; return -E1000_ERR_MAC_INIT;
break; break;
...@@ -128,6 +134,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) ...@@ -128,6 +134,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
mac->mta_reg_count = 128; mac->mta_reg_count = 128;
/* Set rar entry count */ /* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES_82575; mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
if (mac->type == e1000_82576)
mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
/* Set if part includes ASF firmware */ /* Set if part includes ASF firmware */
mac->asf_firmware_present = true; mac->asf_firmware_present = true;
/* Set if manageability features are enabled. */ /* Set if manageability features are enabled. */
...@@ -700,7 +708,6 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw) ...@@ -700,7 +708,6 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
return ret_val; return ret_val;
} }
/** /**
* igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
...@@ -757,18 +764,129 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, ...@@ -757,18 +764,129 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
} }
/** /**
* igb_rar_set_82575 - Set receive address register * igb_init_rx_addrs_82575 - Initialize receive address's
* @hw: pointer to the HW structure
* @rar_count: receive address registers
*
* Setups the receive address registers by setting the base receive address
* register to the devices MAC address and clearing all the other receive
* address registers to 0.
**/
static void igb_init_rx_addrs_82575(struct e1000_hw *hw, u16 rar_count)
{
u32 i;
u8 addr[6] = {0,0,0,0,0,0};
/*
* This function is essentially the same as that of
* e1000_init_rx_addrs_generic. However it also takes care
* of the special case where the register offset of the
* second set of RARs begins elsewhere. This is implicitly taken care by
* function e1000_rar_set_generic.
*/
hw_dbg("e1000_init_rx_addrs_82575");
/* Setup the receive address */
hw_dbg("Programming MAC Address into RAR[0]\n");
hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
/* Zero out the other (rar_entry_count - 1) receive addresses */
hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
for (i = 1; i < rar_count; i++)
hw->mac.ops.rar_set(hw, addr, i);
}
/**
* igb_update_mc_addr_list_82575 - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
* @rar_used_count: the first RAR register free to program
* @rar_count: total number of supported Receive Address Registers
*
* Updates the Receive Address Registers and Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
void igb_update_mc_addr_list_82575(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
{
u32 hash_value;
u32 i;
u8 addr[6] = {0,0,0,0,0,0};
/*
* This function is essentially the same as that of
* igb_update_mc_addr_list_generic. However it also takes care
* of the special case where the register offset of the
* second set of RARs begins elsewhere. This is implicitly taken care by
* function e1000_rar_set_generic.
*/
/*
* Load the first set of multicast addresses into the exact
* filters (RAR). If there are not enough to fill the RAR
* array, clear the filters.
*/
for (i = rar_used_count; i < rar_count; i++) {
if (mc_addr_count) {
igb_rar_set(hw, mc_addr_list, i);
mc_addr_count--;
mc_addr_list += ETH_ALEN;
} else {
igb_rar_set(hw, addr, i);
}
}
/* Clear the old settings from the MTA */
hw_dbg("Clearing MTA\n");
for (i = 0; i < hw->mac.mta_reg_count; i++) {
array_wr32(E1000_MTA, i, 0);
wrfl();
}
/* Load any remaining multicast addresses into the hash table. */
for (; mc_addr_count > 0; mc_addr_count--) {
hash_value = igb_hash_mc_addr(hw, mc_addr_list);
hw_dbg("Hash value = 0x%03X\n", hash_value);
hw->mac.ops.mta_set(hw, hash_value);
mc_addr_list += ETH_ALEN;
}
}
/**
* igb_shutdown_fiber_serdes_link_82575 - Remove link during power down
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @addr: pointer to the receive address
* @index: receive address array register
* *
* Sets the receive address array register at index to the address passed * In the case of fiber serdes, shut down optics and PCS on driver unload
* in by addr. * when management pass thru is not enabled.
**/ **/
static void igb_rar_set_82575(struct e1000_hw *hw, u8 *addr, u32 index) void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw)
{ {
if (index < E1000_RAR_ENTRIES_82575) u32 reg;
igb_rar_set(hw, addr, index);
if (hw->mac.type != e1000_82576 ||
(hw->phy.media_type != e1000_media_type_fiber &&
hw->phy.media_type != e1000_media_type_internal_serdes))
return;
/* if the management interface is not enabled, then power down */
if (!igb_enable_mng_pass_thru(hw)) {
/* Disable PCS to turn off link */
reg = rd32(E1000_PCS_CFG0);
reg &= ~E1000_PCS_CFG_PCS_EN;
wr32(E1000_PCS_CFG0, reg);
/* shutdown the laser */
reg = rd32(E1000_CTRL_EXT);
reg |= E1000_CTRL_EXT_SDP7_DATA;
wr32(E1000_CTRL_EXT, reg);
/* flush the write to verify completion */
wrfl();
msleep(1);
}
return; return;
} }
...@@ -854,7 +972,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) ...@@ -854,7 +972,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
igb_clear_vfta(hw); igb_clear_vfta(hw);
/* Setup the receive address */ /* Setup the receive address */
igb_init_rx_addrs(hw, rar_count); igb_init_rx_addrs_82575(hw, rar_count);
/* Zero out the Multicast HASH table */ /* Zero out the Multicast HASH table */
hw_dbg("Zeroing the MTA\n"); hw_dbg("Zeroing the MTA\n");
for (i = 0; i < mac->mta_reg_count; i++) for (i = 0; i < mac->mta_reg_count; i++)
...@@ -1113,6 +1231,70 @@ static bool igb_sgmii_active_82575(struct e1000_hw *hw) ...@@ -1113,6 +1231,70 @@ static bool igb_sgmii_active_82575(struct e1000_hw *hw)
return ret_val; return ret_val;
} }
/**
* igb_translate_register_82576 - Translate the proper register offset
* @reg: e1000 register to be read
*
* Registers in 82576 are located in different offsets than other adapters
* even though they function in the same manner. This function takes in
* the name of the register to read and returns the correct offset for
* 82576 silicon.
**/
u32 igb_translate_register_82576(u32 reg)
{
/*
* Some of the Kawela registers are located at different
* offsets than they are in older adapters.
* Despite the difference in location, the registers
* function in the same manner.
*/
switch (reg) {
case E1000_TDBAL(0):
reg = 0x0E000;
break;
case E1000_TDBAH(0):
reg = 0x0E004;
break;
case E1000_TDLEN(0):
reg = 0x0E008;
break;
case E1000_TDH(0):
reg = 0x0E010;
break;
case E1000_TDT(0):
reg = 0x0E018;
break;
case E1000_TXDCTL(0):
reg = 0x0E028;
break;
case E1000_RDBAL(0):
reg = 0x0C000;
break;
case E1000_RDBAH(0):
reg = 0x0C004;
break;
case E1000_RDLEN(0):
reg = 0x0C008;
break;
case E1000_RDH(0):
reg = 0x0C010;
break;
case E1000_RDT(0):
reg = 0x0C018;
break;
case E1000_RXDCTL(0):
reg = 0x0C028;
break;
case E1000_SRRCTL(0):
reg = 0x0C00C;
break;
default:
break;
}
return reg;
}
/** /**
* igb_reset_init_script_82575 - Inits HW defaults after reset * igb_reset_init_script_82575 - Inits HW defaults after reset
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
...@@ -1304,7 +1486,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = { ...@@ -1304,7 +1486,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
.reset_hw = igb_reset_hw_82575, .reset_hw = igb_reset_hw_82575,
.init_hw = igb_init_hw_82575, .init_hw = igb_init_hw_82575,
.check_for_link = igb_check_for_link_82575, .check_for_link = igb_check_for_link_82575,
.rar_set = igb_rar_set_82575, .rar_set = igb_rar_set,
.read_mac_addr = igb_read_mac_addr_82575, .read_mac_addr = igb_read_mac_addr_82575,
.get_speed_and_duplex = igb_get_speed_and_duplex_copper, .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
}; };
......
...@@ -28,9 +28,13 @@ ...@@ -28,9 +28,13 @@
#ifndef _E1000_82575_H_ #ifndef _E1000_82575_H_
#define _E1000_82575_H_ #define _E1000_82575_H_
u32 igb_translate_register_82576(u32 reg);
void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32);
extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
#define E1000_RAR_ENTRIES_82575 16 #define E1000_RAR_ENTRIES_82575 16
#define E1000_RAR_ENTRIES_82576 24
/* SRRCTL bit definitions */ /* SRRCTL bit definitions */
#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
...@@ -95,6 +99,8 @@ union e1000_adv_rx_desc { ...@@ -95,6 +99,8 @@ union e1000_adv_rx_desc {
/* RSS Hash results */ /* RSS Hash results */
/* RSS Packet Types as indicated in the receive descriptor */ /* RSS Packet Types as indicated in the receive descriptor */
#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
/* Transmit Descriptor - Advanced */ /* Transmit Descriptor - Advanced */
union e1000_adv_tx_desc { union e1000_adv_tx_desc {
...@@ -144,9 +150,25 @@ struct e1000_adv_tx_context_desc { ...@@ -144,9 +150,25 @@ struct e1000_adv_tx_context_desc {
#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
/* Direct Cache Access (DCA) definitions */ /* Direct Cache Access (DCA) definitions */
#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
/* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
#endif #endif
...@@ -90,6 +90,11 @@ ...@@ -90,6 +90,11 @@
#define E1000_I2CCMD_ERROR 0x80000000 #define E1000_I2CCMD_ERROR 0x80000000
#define E1000_MAX_SGMII_PHY_REG_ADDR 255 #define E1000_MAX_SGMII_PHY_REG_ADDR 255
#define E1000_I2CCMD_PHY_TIMEOUT 200 #define E1000_I2CCMD_PHY_TIMEOUT 200
#define E1000_IVAR_VALID 0x80
#define E1000_GPIE_NSICR 0x00000001
#define E1000_GPIE_MSIX_MODE 0x00000010
#define E1000_GPIE_EIAME 0x40000000
#define E1000_GPIE_PBA 0x80000000
/* Receive Descriptor bit definitions */ /* Receive Descriptor bit definitions */
#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
...@@ -213,6 +218,7 @@ ...@@ -213,6 +218,7 @@
/* Device Control */ /* Device Control */
#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ #define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ #define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ #define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ #define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ #define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
...@@ -244,6 +250,7 @@ ...@@ -244,6 +250,7 @@
*/ */
#define E1000_CONNSW_ENRGSRC 0x4 #define E1000_CONNSW_ENRGSRC 0x4
#define E1000_PCS_CFG_PCS_EN 8
#define E1000_PCS_LCTL_FLV_LINK_UP 1 #define E1000_PCS_LCTL_FLV_LINK_UP 1
#define E1000_PCS_LCTL_FSV_100 2 #define E1000_PCS_LCTL_FSV_100 2
#define E1000_PCS_LCTL_FSV_1000 4 #define E1000_PCS_LCTL_FSV_1000 4
...@@ -253,6 +260,7 @@ ...@@ -253,6 +260,7 @@
#define E1000_PCS_LCTL_AN_ENABLE 0x10000 #define E1000_PCS_LCTL_AN_ENABLE 0x10000
#define E1000_PCS_LCTL_AN_RESTART 0x20000 #define E1000_PCS_LCTL_AN_RESTART 0x20000
#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 #define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
#define E1000_PCS_LSTS_LINK_OK 1 #define E1000_PCS_LSTS_LINK_OK 1
#define E1000_PCS_LSTS_SPEED_100 2 #define E1000_PCS_LSTS_SPEED_100 2
...@@ -360,6 +368,7 @@ ...@@ -360,6 +368,7 @@
#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
#define E1000_PBA_24K 0x0018 #define E1000_PBA_24K 0x0018
#define E1000_PBA_34K 0x0022 #define E1000_PBA_34K 0x0022
#define E1000_PBA_64K 0x0040 /* 64KB */
#define IFS_MAX 80 #define IFS_MAX 80
#define IFS_MIN 40 #define IFS_MIN 40
...@@ -528,6 +537,7 @@ ...@@ -528,6 +537,7 @@
/* PHY Control Register */ /* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ #define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ #define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
......
...@@ -38,6 +38,10 @@ ...@@ -38,6 +38,10 @@
struct e1000_hw; struct e1000_hw;
#define E1000_DEV_ID_82576 0x10C9
#define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7
#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
#define E1000_DEV_ID_82575EB_COPPER 0x10A7 #define E1000_DEV_ID_82575EB_COPPER 0x10A7
#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
...@@ -50,6 +54,7 @@ struct e1000_hw; ...@@ -50,6 +54,7 @@ struct e1000_hw;
enum e1000_mac_type { enum e1000_mac_type {
e1000_undefined = 0, e1000_undefined = 0,
e1000_82575, e1000_82575,
e1000_82576,
e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
}; };
...@@ -410,14 +415,17 @@ struct e1000_mac_operations { ...@@ -410,14 +415,17 @@ struct e1000_mac_operations {
s32 (*check_for_link)(struct e1000_hw *); s32 (*check_for_link)(struct e1000_hw *);
s32 (*reset_hw)(struct e1000_hw *); s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *); s32 (*init_hw)(struct e1000_hw *);
bool (*check_mng_mode)(struct e1000_hw *);
s32 (*setup_physical_interface)(struct e1000_hw *); s32 (*setup_physical_interface)(struct e1000_hw *);
void (*rar_set)(struct e1000_hw *, u8 *, u32); void (*rar_set)(struct e1000_hw *, u8 *, u32);
s32 (*read_mac_addr)(struct e1000_hw *); s32 (*read_mac_addr)(struct e1000_hw *);
s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
void (*mta_set)(struct e1000_hw *, u32);
}; };
struct e1000_phy_operations { struct e1000_phy_operations {
s32 (*acquire_phy)(struct e1000_hw *); s32 (*acquire_phy)(struct e1000_hw *);
s32 (*check_reset_block)(struct e1000_hw *);
s32 (*force_speed_duplex)(struct e1000_hw *); s32 (*force_speed_duplex)(struct e1000_hw *);
s32 (*get_cfg_done)(struct e1000_hw *hw); s32 (*get_cfg_done)(struct e1000_hw *hw);
s32 (*get_cable_length)(struct e1000_hw *); s32 (*get_cable_length)(struct e1000_hw *);
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
static s32 igb_set_default_fc(struct e1000_hw *hw); static s32 igb_set_default_fc(struct e1000_hw *hw);
static s32 igb_set_fc_watermarks(struct e1000_hw *hw); static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
/** /**
* igb_remove_device - Free device specific structure * igb_remove_device - Free device specific structure
...@@ -360,7 +359,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw, ...@@ -360,7 +359,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
* the multicast filter table array address and new table value. See * the multicast filter table array address and new table value. See
* igb_mta_set() * igb_mta_set()
**/ **/
static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
{ {
u32 hash_value, hash_mask; u32 hash_value, hash_mask;
u8 bit_shift = 0; u8 bit_shift = 0;
......
...@@ -94,5 +94,6 @@ enum e1000_mng_mode { ...@@ -94,5 +94,6 @@ enum e1000_mng_mode {
#define E1000_HICR_C 0x02 #define E1000_HICR_C 0x02
extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
extern u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
#endif #endif
...@@ -56,6 +56,9 @@ ...@@ -56,6 +56,9 @@
#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ #define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ #define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ #define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
#define E1000_TCTL 0x00400 /* TX Control - RW */ #define E1000_TCTL 0x00400 /* TX Control - RW */
#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ #define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ #define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
...@@ -217,6 +220,7 @@ ...@@ -217,6 +220,7 @@
#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ #define E1000_RFCTL 0x05008 /* Receive Filter Control*/
#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
#define E1000_RA 0x05400 /* Receive Address - RW Array */ #define E1000_RA 0x05400 /* Receive Address - RW Array */
#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ #define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
#define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */ #define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */
#define E1000_WUC 0x05800 /* Wakeup Control - RW */ #define E1000_WUC 0x05800 /* Wakeup Control - RW */
...@@ -235,6 +239,8 @@ ...@@ -235,6 +239,8 @@
#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ #define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
#define E1000_SWSM 0x05B50 /* SW Semaphore */ #define E1000_SWSM 0x05B50 /* SW Semaphore */
#define E1000_FWSM 0x05B54 /* FW Semaphore */ #define E1000_FWSM 0x05B54 /* FW Semaphore */
#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
#define E1000_HICR 0x08F00 /* Host Inteface Control */ #define E1000_HICR 0x08F00 /* Host Inteface Control */
/* RSS registers */ /* RSS registers */
...@@ -256,7 +262,8 @@ ...@@ -256,7 +262,8 @@
#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) #define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ #define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
#define E1000_REGISTER(a, reg) reg #define E1000_REGISTER(a, reg) (((a)->mac.type < e1000_82576) \
? reg : e1000_translate_register_82576(reg))
#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) #define wr32(reg, value) (writel(value, hw->hw_addr + reg))
#define rd32(reg) (readl(hw->hw_addr + reg)) #define rd32(reg) (readl(hw->hw_addr + reg))
......
...@@ -36,12 +36,20 @@ ...@@ -36,12 +36,20 @@
struct igb_adapter; struct igb_adapter;
#ifdef CONFIG_IGB_LRO
#include <linux/inet_lro.h>
#define MAX_LRO_AGGR 32
#define MAX_LRO_DESCRIPTORS 8
#endif
/* Interrupt defines */ /* Interrupt defines */
#define IGB_MAX_TX_CLEAN 72 #define IGB_MAX_TX_CLEAN 72
#define IGB_MIN_DYN_ITR 3000 #define IGB_MIN_DYN_ITR 3000
#define IGB_MAX_DYN_ITR 96000 #define IGB_MAX_DYN_ITR 96000
#define IGB_START_ITR 6000
/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */
#define IGB_START_ITR 648
#define IGB_DYN_ITR_PACKET_THRESHOLD 2 #define IGB_DYN_ITR_PACKET_THRESHOLD 2
#define IGB_DYN_ITR_LENGTH_LOW 200 #define IGB_DYN_ITR_LENGTH_LOW 200
...@@ -62,6 +70,7 @@ struct igb_adapter; ...@@ -62,6 +70,7 @@ struct igb_adapter;
/* Transmit and receive queues */ /* Transmit and receive queues */
#define IGB_MAX_RX_QUEUES 4 #define IGB_MAX_RX_QUEUES 4
#define IGB_MAX_TX_QUEUES 4
/* RX descriptor control thresholds. /* RX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of * PTHRESH - MAC will consider prefetch if it has fewer than this number of
...@@ -124,6 +133,7 @@ struct igb_buffer { ...@@ -124,6 +133,7 @@ struct igb_buffer {
struct { struct {
struct page *page; struct page *page;
u64 page_dma; u64 page_dma;
unsigned int page_offset;
}; };
}; };
}; };
...@@ -157,18 +167,19 @@ struct igb_ring { ...@@ -157,18 +167,19 @@ struct igb_ring {
union { union {
/* TX */ /* TX */
struct { struct {
spinlock_t tx_clean_lock; struct igb_queue_stats tx_stats;
spinlock_t tx_lock;
bool detect_tx_hung; bool detect_tx_hung;
}; };
/* RX */ /* RX */
struct { struct {
/* arrays of page information for packet split */
struct sk_buff *pending_skb;
int pending_skb_page;
int no_itr_adjust;
struct igb_queue_stats rx_stats; struct igb_queue_stats rx_stats;
struct napi_struct napi; struct napi_struct napi;
int set_itr;
struct igb_ring *buddy;
#ifdef CONFIG_IGB_LRO
struct net_lro_mgr lro_mgr;
bool lro_used;
#endif
}; };
}; };
...@@ -211,7 +222,6 @@ struct igb_adapter { ...@@ -211,7 +222,6 @@ struct igb_adapter {
u32 itr_setting; u32 itr_setting;
u16 tx_itr; u16 tx_itr;
u16 rx_itr; u16 rx_itr;
int set_itr;
struct work_struct reset_task; struct work_struct reset_task;
struct work_struct watchdog_task; struct work_struct watchdog_task;
...@@ -270,15 +280,32 @@ struct igb_adapter { ...@@ -270,15 +280,32 @@ struct igb_adapter {
/* to not mess up cache alignment, always add to the bottom */ /* to not mess up cache alignment, always add to the bottom */
unsigned long state; unsigned long state;
unsigned int msi_enabled; unsigned int flags;
u32 eeprom_wol; u32 eeprom_wol;
/* for ioport free */ /* for ioport free */
int bars; int bars;
int need_ioport; int need_ioport;
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES];
#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
#ifdef CONFIG_IGB_LRO
unsigned int lro_max_aggr;
unsigned int lro_aggregated;
unsigned int lro_flushed;
unsigned int lro_no_desc;
#endif
}; };
#define IGB_FLAG_HAS_MSI (1 << 0)
#define IGB_FLAG_MSI_ENABLE (1 << 1)
#define IGB_FLAG_HAS_DCA (1 << 2)
#define IGB_FLAG_DCA_ENABLED (1 << 3)
#define IGB_FLAG_IN_NETPOLL (1 << 5)
#define IGB_FLAG_QUAD_PORT_A (1 << 6)
#define IGB_FLAG_NEED_CTX_IDX (1 << 7)
enum e1000_state_t { enum e1000_state_t {
__IGB_TESTING, __IGB_TESTING,
__IGB_RESETTING, __IGB_RESETTING,
......
This diff is collapsed.
This diff is collapsed.
################################################################################ ################################################################################
# #
# Intel PRO/10GbE Linux driver # Intel PRO/10GbE Linux driver
# Copyright(c) 1999 - 2006 Intel Corporation. # Copyright(c) 1999 - 2008 Intel Corporation.
# #
# This program is free software; you can redistribute it and/or modify it # This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License, # under the terms and conditions of the GNU General Public License,
......
/******************************************************************************* /*******************************************************************************
Intel PRO/10GbE Linux driver Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2006 Intel Corporation. Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -94,10 +94,8 @@ struct ixgb_adapter; ...@@ -94,10 +94,8 @@ struct ixgb_adapter;
#define MIN_TXD 64 #define MIN_TXD 64
/* hardware cannot reliably support more than 512 descriptors owned by /* hardware cannot reliably support more than 512 descriptors owned by
* hardware descrioptor cache otherwise an unreliable ring under heavy * hardware descriptor cache otherwise an unreliable ring under heavy
* recieve load may result */ * receive load may result */
/* #define DEFAULT_RXD 1024 */
/* #define MAX_RXD 4096 */
#define DEFAULT_RXD 512 #define DEFAULT_RXD 512
#define MAX_RXD 512 #define MAX_RXD 512
#define MIN_RXD 64 #define MIN_RXD 64
...@@ -157,7 +155,6 @@ struct ixgb_adapter { ...@@ -157,7 +155,6 @@ struct ixgb_adapter {
u32 part_num; u32 part_num;
u16 link_speed; u16 link_speed;
u16 link_duplex; u16 link_duplex;
spinlock_t tx_lock;
struct work_struct tx_timeout_task; struct work_struct tx_timeout_task;
struct timer_list blink_timer; struct timer_list blink_timer;
......
/******************************************************************************* /*******************************************************************************
Intel PRO/10GbE Linux driver Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2006 Intel Corporation. Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -108,7 +108,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw, ...@@ -108,7 +108,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
*/ */
eecd_reg &= ~IXGB_EECD_DI; eecd_reg &= ~IXGB_EECD_DI;
if(data & mask) if (data & mask)
eecd_reg |= IXGB_EECD_DI; eecd_reg |= IXGB_EECD_DI;
IXGB_WRITE_REG(hw, EECD, eecd_reg); IXGB_WRITE_REG(hw, EECD, eecd_reg);
...@@ -120,7 +120,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw, ...@@ -120,7 +120,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
mask = mask >> 1; mask = mask >> 1;
} while(mask); } while (mask);
/* We leave the "DI" bit set to "0" when we leave this routine. */ /* We leave the "DI" bit set to "0" when we leave this routine. */
eecd_reg &= ~IXGB_EECD_DI; eecd_reg &= ~IXGB_EECD_DI;
...@@ -152,14 +152,14 @@ ixgb_shift_in_bits(struct ixgb_hw *hw) ...@@ -152,14 +152,14 @@ ixgb_shift_in_bits(struct ixgb_hw *hw)
eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
data = 0; data = 0;
for(i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
data = data << 1; data = data << 1;
ixgb_raise_clock(hw, &eecd_reg); ixgb_raise_clock(hw, &eecd_reg);
eecd_reg = IXGB_READ_REG(hw, EECD); eecd_reg = IXGB_READ_REG(hw, EECD);
eecd_reg &= ~(IXGB_EECD_DI); eecd_reg &= ~(IXGB_EECD_DI);
if(eecd_reg & IXGB_EECD_DO) if (eecd_reg & IXGB_EECD_DO)
data |= 1; data |= 1;
ixgb_lower_clock(hw, &eecd_reg); ixgb_lower_clock(hw, &eecd_reg);
...@@ -205,7 +205,7 @@ ixgb_standby_eeprom(struct ixgb_hw *hw) ...@@ -205,7 +205,7 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
eecd_reg = IXGB_READ_REG(hw, EECD); eecd_reg = IXGB_READ_REG(hw, EECD);
/* Deselct EEPROM */ /* Deselect EEPROM */
eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK); eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
IXGB_WRITE_REG(hw, EECD, eecd_reg); IXGB_WRITE_REG(hw, EECD, eecd_reg);
udelay(50); udelay(50);
...@@ -293,14 +293,14 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw) ...@@ -293,14 +293,14 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
*/ */
ixgb_standby_eeprom(hw); ixgb_standby_eeprom(hw);
/* Now read DO repeatedly until is high (equal to '1'). The EEEPROM will /* Now read DO repeatedly until is high (equal to '1'). The EEPROM will
* signal that the command has been completed by raising the DO signal. * signal that the command has been completed by raising the DO signal.
* If DO does not go high in 10 milliseconds, then error out. * If DO does not go high in 10 milliseconds, then error out.
*/ */
for(i = 0; i < 200; i++) { for (i = 0; i < 200; i++) {
eecd_reg = IXGB_READ_REG(hw, EECD); eecd_reg = IXGB_READ_REG(hw, EECD);
if(eecd_reg & IXGB_EECD_DO) if (eecd_reg & IXGB_EECD_DO)
return (true); return (true);
udelay(50); udelay(50);
...@@ -328,10 +328,10 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) ...@@ -328,10 +328,10 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
u16 checksum = 0; u16 checksum = 0;
u16 i; u16 i;
for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
checksum += ixgb_read_eeprom(hw, i); checksum += ixgb_read_eeprom(hw, i);
if(checksum == (u16) EEPROM_SUM) if (checksum == (u16) EEPROM_SUM)
return (true); return (true);
else else
return (false); return (false);
...@@ -351,7 +351,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw) ...@@ -351,7 +351,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
u16 checksum = 0; u16 checksum = 0;
u16 i; u16 i;
for(i = 0; i < EEPROM_CHECKSUM_REG; i++) for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
checksum += ixgb_read_eeprom(hw, i); checksum += ixgb_read_eeprom(hw, i);
checksum = (u16) EEPROM_SUM - checksum; checksum = (u16) EEPROM_SUM - checksum;
...@@ -365,7 +365,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw) ...@@ -365,7 +365,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
* *
* hw - Struct containing variables accessed by shared code * hw - Struct containing variables accessed by shared code
* reg - offset within the EEPROM to be written to * reg - offset within the EEPROM to be written to
* data - 16 bit word to be writen to the EEPROM * data - 16 bit word to be written to the EEPROM
* *
* If ixgb_update_eeprom_checksum is not called after this function, the * If ixgb_update_eeprom_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum. * EEPROM will most likely contain an invalid checksum.
...@@ -472,7 +472,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw) ...@@ -472,7 +472,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
ee_map = (struct ixgb_ee_map_type *)hw->eeprom; ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
DEBUGOUT("ixgb_ee: Reading eeprom data\n"); DEBUGOUT("ixgb_ee: Reading eeprom data\n");
for(i = 0; i < IXGB_EEPROM_SIZE ; i++) { for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
u16 ee_data; u16 ee_data;
ee_data = ixgb_read_eeprom(hw, i); ee_data = ixgb_read_eeprom(hw, i);
checksum += ee_data; checksum += ee_data;
......
/******************************************************************************* /*******************************************************************************
Intel PRO/10GbE Linux driver Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2006 Intel Corporation. Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -34,11 +34,11 @@ ...@@ -34,11 +34,11 @@
#define IXGB_ETH_LENGTH_OF_ADDRESS 6 #define IXGB_ETH_LENGTH_OF_ADDRESS 6
/* EEPROM Commands */ /* EEPROM Commands */
#define EEPROM_READ_OPCODE 0x6 /* EERPOM read opcode */ #define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */
#define EEPROM_WRITE_OPCODE 0x5 /* EERPOM write opcode */ #define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */
#define EEPROM_ERASE_OPCODE 0x7 /* EERPOM erase opcode */ #define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */
#define EEPROM_EWEN_OPCODE 0x13 /* EERPOM erase/write enable */ #define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */
#define EEPROM_EWDS_OPCODE 0x10 /* EERPOM erast/write disable */ #define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */
/* EEPROM MAP (Word Offsets) */ /* EEPROM MAP (Word Offsets) */
#define EEPROM_IA_1_2_REG 0x0000 #define EEPROM_IA_1_2_REG 0x0000
......
/******************************************************************************* /*******************************************************************************
Intel PRO/10GbE Linux driver Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2006 Intel Corporation. Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -95,7 +95,7 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ...@@ -95,7 +95,7 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->port = PORT_FIBRE; ecmd->port = PORT_FIBRE;
ecmd->transceiver = XCVR_EXTERNAL; ecmd->transceiver = XCVR_EXTERNAL;
if(netif_carrier_ok(adapter->netdev)) { if (netif_carrier_ok(adapter->netdev)) {
ecmd->speed = SPEED_10000; ecmd->speed = SPEED_10000;
ecmd->duplex = DUPLEX_FULL; ecmd->duplex = DUPLEX_FULL;
} else { } else {
...@@ -122,11 +122,11 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ...@@ -122,11 +122,11 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{ {
struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_adapter *adapter = netdev_priv(netdev);
if(ecmd->autoneg == AUTONEG_ENABLE || if (ecmd->autoneg == AUTONEG_ENABLE ||
ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
return -EINVAL; return -EINVAL;
if(netif_running(adapter->netdev)) { if (netif_running(adapter->netdev)) {
ixgb_down(adapter, true); ixgb_down(adapter, true);
ixgb_reset(adapter); ixgb_reset(adapter);
ixgb_up(adapter); ixgb_up(adapter);
...@@ -146,11 +146,11 @@ ixgb_get_pauseparam(struct net_device *netdev, ...@@ -146,11 +146,11 @@ ixgb_get_pauseparam(struct net_device *netdev,
pause->autoneg = AUTONEG_DISABLE; pause->autoneg = AUTONEG_DISABLE;
if(hw->fc.type == ixgb_fc_rx_pause) if (hw->fc.type == ixgb_fc_rx_pause)
pause->rx_pause = 1; pause->rx_pause = 1;
else if(hw->fc.type == ixgb_fc_tx_pause) else if (hw->fc.type == ixgb_fc_tx_pause)
pause->tx_pause = 1; pause->tx_pause = 1;
else if(hw->fc.type == ixgb_fc_full) { else if (hw->fc.type == ixgb_fc_full) {
pause->rx_pause = 1; pause->rx_pause = 1;
pause->tx_pause = 1; pause->tx_pause = 1;
} }
...@@ -163,19 +163,19 @@ ixgb_set_pauseparam(struct net_device *netdev, ...@@ -163,19 +163,19 @@ ixgb_set_pauseparam(struct net_device *netdev,
struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw; struct ixgb_hw *hw = &adapter->hw;
if(pause->autoneg == AUTONEG_ENABLE) if (pause->autoneg == AUTONEG_ENABLE)
return -EINVAL; return -EINVAL;
if(pause->rx_pause && pause->tx_pause) if (pause->rx_pause && pause->tx_pause)
hw->fc.type = ixgb_fc_full; hw->fc.type = ixgb_fc_full;
else if(pause->rx_pause && !pause->tx_pause) else if (pause->rx_pause && !pause->tx_pause)
hw->fc.type = ixgb_fc_rx_pause; hw->fc.type = ixgb_fc_rx_pause;
else if(!pause->rx_pause && pause->tx_pause) else if (!pause->rx_pause && pause->tx_pause)
hw->fc.type = ixgb_fc_tx_pause; hw->fc.type = ixgb_fc_tx_pause;
else if(!pause->rx_pause && !pause->tx_pause) else if (!pause->rx_pause && !pause->tx_pause)
hw->fc.type = ixgb_fc_none; hw->fc.type = ixgb_fc_none;
if(netif_running(adapter->netdev)) { if (netif_running(adapter->netdev)) {
ixgb_down(adapter, true); ixgb_down(adapter, true);
ixgb_up(adapter); ixgb_up(adapter);
ixgb_set_speed_duplex(netdev); ixgb_set_speed_duplex(netdev);
...@@ -200,7 +200,7 @@ ixgb_set_rx_csum(struct net_device *netdev, u32 data) ...@@ -200,7 +200,7 @@ ixgb_set_rx_csum(struct net_device *netdev, u32 data)
adapter->rx_csum = data; adapter->rx_csum = data;
if(netif_running(netdev)) { if (netif_running(netdev)) {
ixgb_down(adapter, true); ixgb_down(adapter, true);
ixgb_up(adapter); ixgb_up(adapter);
ixgb_set_speed_duplex(netdev); ixgb_set_speed_duplex(netdev);
...@@ -229,7 +229,7 @@ ixgb_set_tx_csum(struct net_device *netdev, u32 data) ...@@ -229,7 +229,7 @@ ixgb_set_tx_csum(struct net_device *netdev, u32 data)
static int static int
ixgb_set_tso(struct net_device *netdev, u32 data) ixgb_set_tso(struct net_device *netdev, u32 data)
{ {
if(data) if (data)
netdev->features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO;
else else
netdev->features &= ~NETIF_F_TSO; netdev->features &= ~NETIF_F_TSO;
...@@ -301,7 +301,7 @@ ixgb_get_regs(struct net_device *netdev, ...@@ -301,7 +301,7 @@ ixgb_get_regs(struct net_device *netdev,
*reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */ *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
/* there are 16 RAR entries in hardware, we only use 3 */ /* there are 16 RAR entries in hardware, we only use 3 */
for(i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) { for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
*reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */ *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
*reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */ *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
} }
...@@ -415,7 +415,7 @@ ixgb_get_eeprom(struct net_device *netdev, ...@@ -415,7 +415,7 @@ ixgb_get_eeprom(struct net_device *netdev,
int i, max_len, first_word, last_word; int i, max_len, first_word, last_word;
int ret_val = 0; int ret_val = 0;
if(eeprom->len == 0) { if (eeprom->len == 0) {
ret_val = -EINVAL; ret_val = -EINVAL;
goto geeprom_error; goto geeprom_error;
} }
...@@ -424,12 +424,12 @@ ixgb_get_eeprom(struct net_device *netdev, ...@@ -424,12 +424,12 @@ ixgb_get_eeprom(struct net_device *netdev,
max_len = ixgb_get_eeprom_len(netdev); max_len = ixgb_get_eeprom_len(netdev);
if(eeprom->offset > eeprom->offset + eeprom->len) { if (eeprom->offset > eeprom->offset + eeprom->len) {
ret_val = -EINVAL; ret_val = -EINVAL;
goto geeprom_error; goto geeprom_error;
} }
if((eeprom->offset + eeprom->len) > max_len) if ((eeprom->offset + eeprom->len) > max_len)
eeprom->len = (max_len - eeprom->offset); eeprom->len = (max_len - eeprom->offset);
first_word = eeprom->offset >> 1; first_word = eeprom->offset >> 1;
...@@ -437,16 +437,14 @@ ixgb_get_eeprom(struct net_device *netdev, ...@@ -437,16 +437,14 @@ ixgb_get_eeprom(struct net_device *netdev,
eeprom_buff = kmalloc(sizeof(__le16) * eeprom_buff = kmalloc(sizeof(__le16) *
(last_word - first_word + 1), GFP_KERNEL); (last_word - first_word + 1), GFP_KERNEL);
if(!eeprom_buff) if (!eeprom_buff)
return -ENOMEM; return -ENOMEM;
/* note the eeprom was good because the driver loaded */ /* note the eeprom was good because the driver loaded */
for(i = 0; i <= (last_word - first_word); i++) { for (i = 0; i <= (last_word - first_word); i++)
eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i)); eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
}
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
eeprom->len);
kfree(eeprom_buff); kfree(eeprom_buff);
geeprom_error: geeprom_error:
...@@ -464,35 +462,35 @@ ixgb_set_eeprom(struct net_device *netdev, ...@@ -464,35 +462,35 @@ ixgb_set_eeprom(struct net_device *netdev,
int max_len, first_word, last_word; int max_len, first_word, last_word;
u16 i; u16 i;
if(eeprom->len == 0) if (eeprom->len == 0)
return -EINVAL; return -EINVAL;
if(eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT; return -EFAULT;
max_len = ixgb_get_eeprom_len(netdev); max_len = ixgb_get_eeprom_len(netdev);
if(eeprom->offset > eeprom->offset + eeprom->len) if (eeprom->offset > eeprom->offset + eeprom->len)
return -EINVAL; return -EINVAL;
if((eeprom->offset + eeprom->len) > max_len) if ((eeprom->offset + eeprom->len) > max_len)
eeprom->len = (max_len - eeprom->offset); eeprom->len = (max_len - eeprom->offset);
first_word = eeprom->offset >> 1; first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(max_len, GFP_KERNEL); eeprom_buff = kmalloc(max_len, GFP_KERNEL);
if(!eeprom_buff) if (!eeprom_buff)
return -ENOMEM; return -ENOMEM;
ptr = (void *)eeprom_buff; ptr = (void *)eeprom_buff;
if(eeprom->offset & 1) { if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */ /* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */ /* only the second byte of the word is being modified */
eeprom_buff[0] = ixgb_read_eeprom(hw, first_word); eeprom_buff[0] = ixgb_read_eeprom(hw, first_word);
ptr++; ptr++;
} }
if((eeprom->offset + eeprom->len) & 1) { if ((eeprom->offset + eeprom->len) & 1) {
/* need read/modify/write of last changed EEPROM word */ /* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */ /* only the first byte of the word is being modified */
eeprom_buff[last_word - first_word] eeprom_buff[last_word - first_word]
...@@ -500,11 +498,11 @@ ixgb_set_eeprom(struct net_device *netdev, ...@@ -500,11 +498,11 @@ ixgb_set_eeprom(struct net_device *netdev,
} }
memcpy(ptr, bytes, eeprom->len); memcpy(ptr, bytes, eeprom->len);
for(i = 0; i <= (last_word - first_word); i++) for (i = 0; i <= (last_word - first_word); i++)
ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]); ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]);
/* Update the checksum over the first part of the EEPROM if needed */ /* Update the checksum over the first part of the EEPROM if needed */
if(first_word <= EEPROM_CHECKSUM_REG) if (first_word <= EEPROM_CHECKSUM_REG)
ixgb_update_eeprom_checksum(hw); ixgb_update_eeprom_checksum(hw);
kfree(eeprom_buff); kfree(eeprom_buff);
...@@ -557,10 +555,10 @@ ixgb_set_ringparam(struct net_device *netdev, ...@@ -557,10 +555,10 @@ ixgb_set_ringparam(struct net_device *netdev,
tx_old = adapter->tx_ring; tx_old = adapter->tx_ring;
rx_old = adapter->rx_ring; rx_old = adapter->rx_ring;
if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL; return -EINVAL;
if(netif_running(adapter->netdev)) if (netif_running(adapter->netdev))
ixgb_down(adapter, true); ixgb_down(adapter, true);
rxdr->count = max(ring->rx_pending,(u32)MIN_RXD); rxdr->count = max(ring->rx_pending,(u32)MIN_RXD);
...@@ -571,11 +569,11 @@ ixgb_set_ringparam(struct net_device *netdev, ...@@ -571,11 +569,11 @@ ixgb_set_ringparam(struct net_device *netdev,
txdr->count = min(txdr->count,(u32)MAX_TXD); txdr->count = min(txdr->count,(u32)MAX_TXD);
txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
if(netif_running(adapter->netdev)) { if (netif_running(adapter->netdev)) {
/* Try to get new resources before deleting old */ /* Try to get new resources before deleting old */
if((err = ixgb_setup_rx_resources(adapter))) if ((err = ixgb_setup_rx_resources(adapter)))
goto err_setup_rx; goto err_setup_rx;
if((err = ixgb_setup_tx_resources(adapter))) if ((err = ixgb_setup_tx_resources(adapter)))
goto err_setup_tx; goto err_setup_tx;
/* save the new, restore the old in order to free it, /* save the new, restore the old in order to free it,
...@@ -589,7 +587,7 @@ ixgb_set_ringparam(struct net_device *netdev, ...@@ -589,7 +587,7 @@ ixgb_set_ringparam(struct net_device *netdev,
ixgb_free_tx_resources(adapter); ixgb_free_tx_resources(adapter);
adapter->rx_ring = rx_new; adapter->rx_ring = rx_new;
adapter->tx_ring = tx_new; adapter->tx_ring = tx_new;
if((err = ixgb_up(adapter))) if ((err = ixgb_up(adapter)))
return err; return err;
ixgb_set_speed_duplex(netdev); ixgb_set_speed_duplex(netdev);
} }
...@@ -615,7 +613,7 @@ ixgb_led_blink_callback(unsigned long data) ...@@ -615,7 +613,7 @@ ixgb_led_blink_callback(unsigned long data)
{ {
struct ixgb_adapter *adapter = (struct ixgb_adapter *)data; struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
if(test_and_change_bit(IXGB_LED_ON, &adapter->led_status)) if (test_and_change_bit(IXGB_LED_ON, &adapter->led_status))
ixgb_led_off(&adapter->hw); ixgb_led_off(&adapter->hw);
else else
ixgb_led_on(&adapter->hw); ixgb_led_on(&adapter->hw);
...@@ -631,7 +629,7 @@ ixgb_phys_id(struct net_device *netdev, u32 data) ...@@ -631,7 +629,7 @@ ixgb_phys_id(struct net_device *netdev, u32 data)
if (!data) if (!data)
data = INT_MAX; data = INT_MAX;
if(!adapter->blink_timer.function) { if (!adapter->blink_timer.function) {
init_timer(&adapter->blink_timer); init_timer(&adapter->blink_timer);
adapter->blink_timer.function = ixgb_led_blink_callback; adapter->blink_timer.function = ixgb_led_blink_callback;
adapter->blink_timer.data = (unsigned long)adapter; adapter->blink_timer.data = (unsigned long)adapter;
...@@ -666,7 +664,7 @@ ixgb_get_ethtool_stats(struct net_device *netdev, ...@@ -666,7 +664,7 @@ ixgb_get_ethtool_stats(struct net_device *netdev,
int i; int i;
ixgb_update_stats(adapter); ixgb_update_stats(adapter);
for(i = 0; i < IXGB_STATS_LEN; i++) { for (i = 0; i < IXGB_STATS_LEN; i++) {
char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset;
data[i] = (ixgb_gstrings_stats[i].sizeof_stat == data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p; sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
...@@ -680,7 +678,7 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -680,7 +678,7 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch(stringset) { switch(stringset) {
case ETH_SS_STATS: case ETH_SS_STATS:
for(i=0; i < IXGB_STATS_LEN; i++) { for (i = 0; i < IXGB_STATS_LEN; i++) {
memcpy(data + i * ETH_GSTRING_LEN, memcpy(data + i * ETH_GSTRING_LEN,
ixgb_gstrings_stats[i].stat_string, ixgb_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN); ETH_GSTRING_LEN);
......
/******************************************************************************* /*******************************************************************************
Intel PRO/10GbE Linux driver Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2006 Intel Corporation. Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -125,7 +125,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw) ...@@ -125,7 +125,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
/* If we are stopped or resetting exit gracefully and wait to be /* If we are stopped or resetting exit gracefully and wait to be
* started again before accessing the hardware. * started again before accessing the hardware.
*/ */
if(hw->adapter_stopped) { if (hw->adapter_stopped) {
DEBUGOUT("Exiting because the adapter is already stopped!!!\n"); DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
return false; return false;
} }
...@@ -347,7 +347,7 @@ ixgb_init_hw(struct ixgb_hw *hw) ...@@ -347,7 +347,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
/* Zero out the Multicast HASH table */ /* Zero out the Multicast HASH table */
DEBUGOUT("Zeroing the MTA\n"); DEBUGOUT("Zeroing the MTA\n");
for(i = 0; i < IXGB_MC_TBL_SIZE; i++) for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* Zero out the VLAN Filter Table Array */ /* Zero out the VLAN Filter Table Array */
...@@ -371,7 +371,7 @@ ixgb_init_hw(struct ixgb_hw *hw) ...@@ -371,7 +371,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
* hw - Struct containing variables accessed by shared code * hw - Struct containing variables accessed by shared code
* *
* Places the MAC address in receive address register 0 and clears the rest * Places the MAC address in receive address register 0 and clears the rest
* of the receive addresss registers. Clears the multicast table. Assumes * of the receive address registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called. * the receiver is in reset when the routine is called.
*****************************************************************************/ *****************************************************************************/
static void static void
...@@ -413,7 +413,7 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw) ...@@ -413,7 +413,7 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
/* Zero out the other 15 receive addresses. */ /* Zero out the other 15 receive addresses. */
DEBUGOUT("Clearing RAR[1-15]\n"); DEBUGOUT("Clearing RAR[1-15]\n");
for(i = 1; i < IXGB_RAR_ENTRIES; i++) { for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
/* Write high reg first to disable the AV bit first */ /* Write high reg first to disable the AV bit first */
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
...@@ -452,19 +452,18 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw, ...@@ -452,19 +452,18 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
/* Clear RAR[1-15] */ /* Clear RAR[1-15] */
DEBUGOUT(" Clearing RAR[1-15]\n"); DEBUGOUT(" Clearing RAR[1-15]\n");
for(i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) { for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
} }
/* Clear the MTA */ /* Clear the MTA */
DEBUGOUT(" Clearing MTA\n"); DEBUGOUT(" Clearing MTA\n");
for(i = 0; i < IXGB_MC_TBL_SIZE; i++) { for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
}
/* Add the new addresses */ /* Add the new addresses */
for(i = 0; i < mc_addr_count; i++) { for (i = 0; i < mc_addr_count; i++) {
DEBUGOUT(" Adding the multicast addresses:\n"); DEBUGOUT(" Adding the multicast addresses:\n");
DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i, DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)], mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
...@@ -482,7 +481,7 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw, ...@@ -482,7 +481,7 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
/* Place this multicast address in the RAR if there is room, * /* Place this multicast address in the RAR if there is room, *
* else put it in the MTA * else put it in the MTA
*/ */
if(rar_used_count < IXGB_RAR_ENTRIES) { if (rar_used_count < IXGB_RAR_ENTRIES) {
ixgb_rar_set(hw, ixgb_rar_set(hw,
mc_addr_list + mc_addr_list +
(i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)), (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
...@@ -649,7 +648,7 @@ ixgb_clear_vfta(struct ixgb_hw *hw) ...@@ -649,7 +648,7 @@ ixgb_clear_vfta(struct ixgb_hw *hw)
{ {
u32 offset; u32 offset;
for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0); IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
return; return;
} }
...@@ -719,9 +718,8 @@ ixgb_setup_fc(struct ixgb_hw *hw) ...@@ -719,9 +718,8 @@ ixgb_setup_fc(struct ixgb_hw *hw)
/* Write the new settings */ /* Write the new settings */
IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
if (pap_reg != 0) { if (pap_reg != 0)
IXGB_WRITE_REG(hw, PAP, pap_reg); IXGB_WRITE_REG(hw, PAP, pap_reg);
}
/* Set the flow control receive threshold registers. Normally, /* Set the flow control receive threshold registers. Normally,
* these registers will be set to a default threshold that may be * these registers will be set to a default threshold that may be
...@@ -729,14 +727,14 @@ ixgb_setup_fc(struct ixgb_hw *hw) ...@@ -729,14 +727,14 @@ ixgb_setup_fc(struct ixgb_hw *hw)
* ability to transmit pause frames in not enabled, then these * ability to transmit pause frames in not enabled, then these
* registers will be set to 0. * registers will be set to 0.
*/ */
if(!(hw->fc.type & ixgb_fc_tx_pause)) { if (!(hw->fc.type & ixgb_fc_tx_pause)) {
IXGB_WRITE_REG(hw, FCRTL, 0); IXGB_WRITE_REG(hw, FCRTL, 0);
IXGB_WRITE_REG(hw, FCRTH, 0); IXGB_WRITE_REG(hw, FCRTH, 0);
} else { } else {
/* We need to set up the Receive Threshold high and low water /* We need to set up the Receive Threshold high and low water
* marks as well as (optionally) enabling the transmission of XON * marks as well as (optionally) enabling the transmission of XON
* frames. */ * frames. */
if(hw->fc.send_xon) { if (hw->fc.send_xon) {
IXGB_WRITE_REG(hw, FCRTL, IXGB_WRITE_REG(hw, FCRTL,
(hw->fc.low_water | IXGB_FCRTL_XONE)); (hw->fc.low_water | IXGB_FCRTL_XONE));
} else { } else {
...@@ -791,7 +789,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw, ...@@ -791,7 +789,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
** from the CPU Write to the Ready bit assertion. ** from the CPU Write to the Ready bit assertion.
**************************************************************/ **************************************************************/
for(i = 0; i < 10; i++) for (i = 0; i < 10; i++)
{ {
udelay(10); udelay(10);
...@@ -818,7 +816,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw, ...@@ -818,7 +816,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
** from the CPU Write to the Ready bit assertion. ** from the CPU Write to the Ready bit assertion.
**************************************************************/ **************************************************************/
for(i = 0; i < 10; i++) for (i = 0; i < 10; i++)
{ {
udelay(10); udelay(10);
...@@ -887,7 +885,7 @@ ixgb_write_phy_reg(struct ixgb_hw *hw, ...@@ -887,7 +885,7 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
** from the CPU Write to the Ready bit assertion. ** from the CPU Write to the Ready bit assertion.
**************************************************************/ **************************************************************/
for(i = 0; i < 10; i++) for (i = 0; i < 10; i++)
{ {
udelay(10); udelay(10);
...@@ -914,7 +912,7 @@ ixgb_write_phy_reg(struct ixgb_hw *hw, ...@@ -914,7 +912,7 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
** from the CPU Write to the Ready bit assertion. ** from the CPU Write to the Ready bit assertion.
**************************************************************/ **************************************************************/
for(i = 0; i < 10; i++) for (i = 0; i < 10; i++)
{ {
udelay(10); udelay(10);
...@@ -965,7 +963,7 @@ ixgb_check_for_link(struct ixgb_hw *hw) ...@@ -965,7 +963,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
} }
/****************************************************************************** /******************************************************************************
* Check for a bad link condition that may have occured. * Check for a bad link condition that may have occurred.
* The indication is that the RFC / LFC registers may be incrementing * The indication is that the RFC / LFC registers may be incrementing
* continually. A full adapter reset is required to recover. * continually. A full adapter reset is required to recover.
* *
...@@ -1007,7 +1005,7 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw) ...@@ -1007,7 +1005,7 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
DEBUGFUNC("ixgb_clear_hw_cntrs"); DEBUGFUNC("ixgb_clear_hw_cntrs");
/* if we are stopped or resetting exit gracefully */ /* if we are stopped or resetting exit gracefully */
if(hw->adapter_stopped) { if (hw->adapter_stopped) {
DEBUGOUT("Exiting because the adapter is stopped!!!\n"); DEBUGOUT("Exiting because the adapter is stopped!!!\n");
return; return;
} }
......
/******************************************************************************* /*******************************************************************************
Intel PRO/10GbE Linux driver Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2006 Intel Corporation. Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
......
/******************************************************************************* /*******************************************************************************
Intel PRO/10GbE Linux driver Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2006 Intel Corporation. Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
......
This diff is collapsed.
/******************************************************************************* /*******************************************************************************
Intel PRO/10GbE Linux driver Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2006 Intel Corporation. Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#undef ASSERT #undef ASSERT
#define ASSERT(x) if(!(x)) BUG() #define ASSERT(x) if (!(x)) BUG()
#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B) #define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
#ifdef DBG #ifdef DBG
......
/******************************************************************************* /*******************************************************************************
Intel PRO/10GbE Linux driver Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2006 Intel Corporation. Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -200,7 +200,7 @@ struct ixgb_option { ...@@ -200,7 +200,7 @@ struct ixgb_option {
static int __devinit static int __devinit
ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
{ {
if(*value == OPTION_UNSET) { if (*value == OPTION_UNSET) {
*value = opt->def; *value = opt->def;
return 0; return 0;
} }
...@@ -217,7 +217,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) ...@@ -217,7 +217,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
} }
break; break;
case range_option: case range_option:
if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) { if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
printk(KERN_INFO "%s set to %i\n", opt->name, *value); printk(KERN_INFO "%s set to %i\n", opt->name, *value);
return 0; return 0;
} }
...@@ -226,10 +226,10 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) ...@@ -226,10 +226,10 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
int i; int i;
struct ixgb_opt_list *ent; struct ixgb_opt_list *ent;
for(i = 0; i < opt->arg.l.nr; i++) { for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i]; ent = &opt->arg.l.p[i];
if(*value == ent->i) { if (*value == ent->i) {
if(ent->str[0] != '\0') if (ent->str[0] != '\0')
printk(KERN_INFO "%s\n", ent->str); printk(KERN_INFO "%s\n", ent->str);
return 0; return 0;
} }
...@@ -260,7 +260,7 @@ void __devinit ...@@ -260,7 +260,7 @@ void __devinit
ixgb_check_options(struct ixgb_adapter *adapter) ixgb_check_options(struct ixgb_adapter *adapter)
{ {
int bd = adapter->bd_number; int bd = adapter->bd_number;
if(bd >= IXGB_MAX_NIC) { if (bd >= IXGB_MAX_NIC) {
printk(KERN_NOTICE printk(KERN_NOTICE
"Warning: no configuration for board #%i\n", bd); "Warning: no configuration for board #%i\n", bd);
printk(KERN_NOTICE "Using defaults for all values\n"); printk(KERN_NOTICE "Using defaults for all values\n");
...@@ -277,7 +277,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) ...@@ -277,7 +277,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
}; };
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
if(num_TxDescriptors > bd) { if (num_TxDescriptors > bd) {
tx_ring->count = TxDescriptors[bd]; tx_ring->count = TxDescriptors[bd];
ixgb_validate_option(&tx_ring->count, &opt); ixgb_validate_option(&tx_ring->count, &opt);
} else { } else {
...@@ -296,7 +296,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) ...@@ -296,7 +296,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
}; };
struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
if(num_RxDescriptors > bd) { if (num_RxDescriptors > bd) {
rx_ring->count = RxDescriptors[bd]; rx_ring->count = RxDescriptors[bd];
ixgb_validate_option(&rx_ring->count, &opt); ixgb_validate_option(&rx_ring->count, &opt);
} else { } else {
...@@ -312,7 +312,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) ...@@ -312,7 +312,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
.def = OPTION_ENABLED .def = OPTION_ENABLED
}; };
if(num_XsumRX > bd) { if (num_XsumRX > bd) {
unsigned int rx_csum = XsumRX[bd]; unsigned int rx_csum = XsumRX[bd];
ixgb_validate_option(&rx_csum, &opt); ixgb_validate_option(&rx_csum, &opt);
adapter->rx_csum = rx_csum; adapter->rx_csum = rx_csum;
...@@ -338,7 +338,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) ...@@ -338,7 +338,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
.p = fc_list }} .p = fc_list }}
}; };
if(num_FlowControl > bd) { if (num_FlowControl > bd) {
unsigned int fc = FlowControl[bd]; unsigned int fc = FlowControl[bd];
ixgb_validate_option(&fc, &opt); ixgb_validate_option(&fc, &opt);
adapter->hw.fc.type = fc; adapter->hw.fc.type = fc;
...@@ -356,14 +356,14 @@ ixgb_check_options(struct ixgb_adapter *adapter) ...@@ -356,14 +356,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
.max = MAX_FCRTH}} .max = MAX_FCRTH}}
}; };
if(num_RxFCHighThresh > bd) { if (num_RxFCHighThresh > bd) {
adapter->hw.fc.high_water = RxFCHighThresh[bd]; adapter->hw.fc.high_water = RxFCHighThresh[bd];
ixgb_validate_option(&adapter->hw.fc.high_water, &opt); ixgb_validate_option(&adapter->hw.fc.high_water, &opt);
} else { } else {
adapter->hw.fc.high_water = opt.def; adapter->hw.fc.high_water = opt.def;
} }
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
printk (KERN_INFO printk(KERN_INFO
"Ignoring RxFCHighThresh when no RxFC\n"); "Ignoring RxFCHighThresh when no RxFC\n");
} }
{ /* Receive Flow Control Low Threshold */ { /* Receive Flow Control Low Threshold */
...@@ -376,14 +376,14 @@ ixgb_check_options(struct ixgb_adapter *adapter) ...@@ -376,14 +376,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
.max = MAX_FCRTL}} .max = MAX_FCRTL}}
}; };
if(num_RxFCLowThresh > bd) { if (num_RxFCLowThresh > bd) {
adapter->hw.fc.low_water = RxFCLowThresh[bd]; adapter->hw.fc.low_water = RxFCLowThresh[bd];
ixgb_validate_option(&adapter->hw.fc.low_water, &opt); ixgb_validate_option(&adapter->hw.fc.low_water, &opt);
} else { } else {
adapter->hw.fc.low_water = opt.def; adapter->hw.fc.low_water = opt.def;
} }
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
printk (KERN_INFO printk(KERN_INFO
"Ignoring RxFCLowThresh when no RxFC\n"); "Ignoring RxFCLowThresh when no RxFC\n");
} }
{ /* Flow Control Pause Time Request*/ { /* Flow Control Pause Time Request*/
...@@ -396,7 +396,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) ...@@ -396,7 +396,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
.max = MAX_FCPAUSE}} .max = MAX_FCPAUSE}}
}; };
if(num_FCReqTimeout > bd) { if (num_FCReqTimeout > bd) {
unsigned int pause_time = FCReqTimeout[bd]; unsigned int pause_time = FCReqTimeout[bd];
ixgb_validate_option(&pause_time, &opt); ixgb_validate_option(&pause_time, &opt);
adapter->hw.fc.pause_time = pause_time; adapter->hw.fc.pause_time = pause_time;
...@@ -404,7 +404,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) ...@@ -404,7 +404,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.pause_time = opt.def; adapter->hw.fc.pause_time = opt.def;
} }
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
printk (KERN_INFO printk(KERN_INFO
"Ignoring FCReqTimeout when no RxFC\n"); "Ignoring FCReqTimeout when no RxFC\n");
} }
/* high low and spacing check for rx flow control thresholds */ /* high low and spacing check for rx flow control thresholds */
...@@ -412,7 +412,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) ...@@ -412,7 +412,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
/* high must be greater than low */ /* high must be greater than low */
if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
/* set defaults */ /* set defaults */
printk (KERN_INFO printk(KERN_INFO
"RxFCHighThresh must be >= (RxFCLowThresh + 8), " "RxFCHighThresh must be >= (RxFCLowThresh + 8), "
"Using Defaults\n"); "Using Defaults\n");
adapter->hw.fc.high_water = DEFAULT_FCRTH; adapter->hw.fc.high_water = DEFAULT_FCRTH;
...@@ -429,7 +429,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) ...@@ -429,7 +429,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
.max = MAX_RDTR}} .max = MAX_RDTR}}
}; };
if(num_RxIntDelay > bd) { if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd]; adapter->rx_int_delay = RxIntDelay[bd];
ixgb_validate_option(&adapter->rx_int_delay, &opt); ixgb_validate_option(&adapter->rx_int_delay, &opt);
} else { } else {
...@@ -446,7 +446,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) ...@@ -446,7 +446,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
.max = MAX_TIDV}} .max = MAX_TIDV}}
}; };
if(num_TxIntDelay > bd) { if (num_TxIntDelay > bd) {
adapter->tx_int_delay = TxIntDelay[bd]; adapter->tx_int_delay = TxIntDelay[bd];
ixgb_validate_option(&adapter->tx_int_delay, &opt); ixgb_validate_option(&adapter->tx_int_delay, &opt);
} else { } else {
...@@ -462,7 +462,7 @@ ixgb_check_options(struct ixgb_adapter *adapter) ...@@ -462,7 +462,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
.def = OPTION_ENABLED .def = OPTION_ENABLED
}; };
if(num_IntDelayEnable > bd) { if (num_IntDelayEnable > bd) {
unsigned int ide = IntDelayEnable[bd]; unsigned int ide = IntDelayEnable[bd];
ixgb_validate_option(&ide, &opt); ixgb_validate_option(&ide, &opt);
adapter->tx_int_delay_enable = ide; adapter->tx_int_delay_enable = ide;
......
...@@ -177,6 +177,7 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) ...@@ -177,6 +177,7 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
return bus; return bus;
} }
EXPORT_SYMBOL(alloc_mdio_bitbang);
void free_mdio_bitbang(struct mii_bus *bus) void free_mdio_bitbang(struct mii_bus *bus)
{ {
...@@ -185,5 +186,6 @@ void free_mdio_bitbang(struct mii_bus *bus) ...@@ -185,5 +186,6 @@ void free_mdio_bitbang(struct mii_bus *bus)
module_put(ctrl->ops->owner); module_put(ctrl->ops->owner);
kfree(bus); kfree(bus);
} }
EXPORT_SYMBOL(free_mdio_bitbang);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -86,7 +86,7 @@ ...@@ -86,7 +86,7 @@
#include "s2io.h" #include "s2io.h"
#include "s2io-regs.h" #include "s2io-regs.h"
#define DRV_VERSION "2.0.26.24" #define DRV_VERSION "2.0.26.25"
/* S2io Driver name & version. */ /* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion"; static char s2io_driver_name[] = "Neterion";
...@@ -1891,8 +1891,6 @@ static int init_nic(struct s2io_nic *nic) ...@@ -1891,8 +1891,6 @@ static int init_nic(struct s2io_nic *nic)
static int s2io_link_fault_indication(struct s2io_nic *nic) static int s2io_link_fault_indication(struct s2io_nic *nic)
{ {
if (nic->config.intr_type != INTA)
return MAC_RMAC_ERR_TIMER;
if (nic->device_type == XFRAME_II_DEVICE) if (nic->device_type == XFRAME_II_DEVICE)
return LINK_UP_DOWN_INTERRUPT; return LINK_UP_DOWN_INTERRUPT;
else else
...@@ -1925,7 +1923,9 @@ static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag) ...@@ -1925,7 +1923,9 @@ static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
{ {
struct XENA_dev_config __iomem *bar0 = nic->bar0; struct XENA_dev_config __iomem *bar0 = nic->bar0;
register u64 gen_int_mask = 0; register u64 gen_int_mask = 0;
u64 interruptible;
writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
if (mask & TX_DMA_INTR) { if (mask & TX_DMA_INTR) {
gen_int_mask |= TXDMA_INT_M; gen_int_mask |= TXDMA_INT_M;
...@@ -2015,10 +2015,12 @@ static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag) ...@@ -2015,10 +2015,12 @@ static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
gen_int_mask |= RXMAC_INT_M; gen_int_mask |= RXMAC_INT_M;
do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag, do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
&bar0->mac_int_mask); &bar0->mac_int_mask);
do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR | interruptible = RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR | RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
RMAC_DOUBLE_ECC_ERR | RMAC_DOUBLE_ECC_ERR;
RMAC_LINK_STATE_CHANGE_INT, if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
interruptible |= RMAC_LINK_STATE_CHANGE_INT;
do_s2io_write_bits(interruptible,
flag, &bar0->mac_rmac_err_mask); flag, &bar0->mac_rmac_err_mask);
} }
...@@ -2501,6 +2503,9 @@ static void stop_nic(struct s2io_nic *nic) ...@@ -2501,6 +2503,9 @@ static void stop_nic(struct s2io_nic *nic)
/** /**
* fill_rx_buffers - Allocates the Rx side skbs * fill_rx_buffers - Allocates the Rx side skbs
* @ring_info: per ring structure * @ring_info: per ring structure
* @from_card_up: If this is true, we will map the buffer to get
* the dma address for buf0 and buf1 to give it to the card.
* Else we will sync the already mapped buffer to give it to the card.
* Description: * Description:
* The function allocates Rx side skbs and puts the physical * The function allocates Rx side skbs and puts the physical
* address of these buffers into the RxD buffer pointers, so that the NIC * address of these buffers into the RxD buffer pointers, so that the NIC
...@@ -2518,7 +2523,7 @@ static void stop_nic(struct s2io_nic *nic) ...@@ -2518,7 +2523,7 @@ static void stop_nic(struct s2io_nic *nic)
* SUCCESS on success or an appropriate -ve value on failure. * SUCCESS on success or an appropriate -ve value on failure.
*/ */
static int fill_rx_buffers(struct ring_info *ring) static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct RxD_t *rxdp; struct RxD_t *rxdp;
...@@ -2637,17 +2642,16 @@ static int fill_rx_buffers(struct ring_info *ring) ...@@ -2637,17 +2642,16 @@ static int fill_rx_buffers(struct ring_info *ring)
skb->data = (void *) (unsigned long)tmp; skb->data = (void *) (unsigned long)tmp;
skb_reset_tail_pointer(skb); skb_reset_tail_pointer(skb);
/* AK: check is wrong. 0 can be valid dma address */ if (from_card_up) {
if (!(rxdp3->Buffer0_ptr))
rxdp3->Buffer0_ptr = rxdp3->Buffer0_ptr =
pci_map_single(ring->pdev, ba->ba_0, pci_map_single(ring->pdev, ba->ba_0,
BUF0_LEN, PCI_DMA_FROMDEVICE); BUF0_LEN, PCI_DMA_FROMDEVICE);
else if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
goto pci_map_failed;
} else
pci_dma_sync_single_for_device(ring->pdev, pci_dma_sync_single_for_device(ring->pdev,
(dma_addr_t) rxdp3->Buffer0_ptr, (dma_addr_t) rxdp3->Buffer0_ptr,
BUF0_LEN, PCI_DMA_FROMDEVICE); BUF0_LEN, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
goto pci_map_failed;
rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
if (ring->rxd_mode == RXD_MODE_3B) { if (ring->rxd_mode == RXD_MODE_3B) {
...@@ -2664,14 +2668,14 @@ static int fill_rx_buffers(struct ring_info *ring) ...@@ -2664,14 +2668,14 @@ static int fill_rx_buffers(struct ring_info *ring)
if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
goto pci_map_failed; goto pci_map_failed;
/* AK: check is wrong */ if (from_card_up) {
if (!rxdp3->Buffer1_ptr)
rxdp3->Buffer1_ptr = rxdp3->Buffer1_ptr =
pci_map_single(ring->pdev, pci_map_single(ring->pdev,
ba->ba_1, BUF1_LEN, ba->ba_1, BUF1_LEN,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { if (pci_dma_mapping_error
(rxdp3->Buffer1_ptr)) {
pci_unmap_single pci_unmap_single
(ring->pdev, (ring->pdev,
(dma_addr_t)(unsigned long) (dma_addr_t)(unsigned long)
...@@ -2680,6 +2684,7 @@ static int fill_rx_buffers(struct ring_info *ring) ...@@ -2680,6 +2684,7 @@ static int fill_rx_buffers(struct ring_info *ring)
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
goto pci_map_failed; goto pci_map_failed;
} }
}
rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
rxdp->Control_2 |= SET_BUFFER2_SIZE_3 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
(ring->mtu + 4); (ring->mtu + 4);
...@@ -2813,7 +2818,7 @@ static void free_rx_buffers(struct s2io_nic *sp) ...@@ -2813,7 +2818,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
static int s2io_chk_rx_buffers(struct ring_info *ring) static int s2io_chk_rx_buffers(struct ring_info *ring)
{ {
if (fill_rx_buffers(ring) == -ENOMEM) { if (fill_rx_buffers(ring, 0) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
} }
...@@ -2944,7 +2949,7 @@ static void s2io_netpoll(struct net_device *dev) ...@@ -2944,7 +2949,7 @@ static void s2io_netpoll(struct net_device *dev)
rx_intr_handler(&mac_control->rings[i], 0); rx_intr_handler(&mac_control->rings[i], 0);
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
break; break;
...@@ -4373,8 +4378,12 @@ static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) ...@@ -4373,8 +4378,12 @@ static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
/* Nothing much can be done. Get out */ /* Nothing much can be done. Get out */
return IRQ_HANDLED; return IRQ_HANDLED;
if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
if (reason & GEN_INTR_TXPIC)
s2io_txpic_intr_handle(sp);
if (reason & GEN_INTR_TXTRAFFIC) if (reason & GEN_INTR_TXTRAFFIC)
writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
...@@ -4383,8 +4392,10 @@ static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) ...@@ -4383,8 +4392,10 @@ static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
writeq(sp->general_int_mask, &bar0->general_int_mask); writeq(sp->general_int_mask, &bar0->general_int_mask);
readl(&bar0->general_int_status); readl(&bar0->general_int_status);
return IRQ_HANDLED; return IRQ_HANDLED;
}
/* The interrupt was not raised by us */
return IRQ_NONE;
} }
static void s2io_txpic_intr_handle(struct s2io_nic *sp) static void s2io_txpic_intr_handle(struct s2io_nic *sp)
...@@ -7112,6 +7123,9 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) ...@@ -7112,6 +7123,9 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
s2io_rem_isr(sp); s2io_rem_isr(sp);
/* stop the tx queue, indicate link down */
s2io_link(sp, LINK_DOWN);
/* Check if the device is Quiescent and then Reset the NIC */ /* Check if the device is Quiescent and then Reset the NIC */
while(do_io) { while(do_io) {
/* As per the HW requirement we need to replenish the /* As per the HW requirement we need to replenish the
...@@ -7183,7 +7197,7 @@ static int s2io_card_up(struct s2io_nic * sp) ...@@ -7183,7 +7197,7 @@ static int s2io_card_up(struct s2io_nic * sp)
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
mac_control->rings[i].mtu = dev->mtu; mac_control->rings[i].mtu = dev->mtu;
ret = fill_rx_buffers(&mac_control->rings[i]); ret = fill_rx_buffers(&mac_control->rings[i], 1);
if (ret) { if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
dev->name); dev->name);
...@@ -7244,17 +7258,19 @@ static int s2io_card_up(struct s2io_nic * sp) ...@@ -7244,17 +7258,19 @@ static int s2io_card_up(struct s2io_nic * sp)
S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2)); S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
set_bit(__S2IO_STATE_CARD_UP, &sp->state);
/* Enable select interrupts */ /* Enable select interrupts */
en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
if (sp->config.intr_type != INTA) if (sp->config.intr_type != INTA) {
en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS); interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
else { en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
} else {
interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
interruptible |= TX_PIC_INTR; interruptible |= TX_PIC_INTR;
en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS); en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
} }
set_bit(__S2IO_STATE_CARD_UP, &sp->state);
return 0; return 0;
} }
......
...@@ -1107,6 +1107,7 @@ static int init_shared_mem(struct s2io_nic *sp); ...@@ -1107,6 +1107,7 @@ static int init_shared_mem(struct s2io_nic *sp);
static void free_shared_mem(struct s2io_nic *sp); static void free_shared_mem(struct s2io_nic *sp);
static int init_nic(struct s2io_nic *nic); static int init_nic(struct s2io_nic *nic);
static int rx_intr_handler(struct ring_info *ring_data, int budget); static int rx_intr_handler(struct ring_info *ring_data, int budget);
static void s2io_txpic_intr_handle(struct s2io_nic *sp);
static void tx_intr_handler(struct fifo_info *fifo_data); static void tx_intr_handler(struct fifo_info *fifo_data);
static void s2io_handle_errors(void * dev_id); static void s2io_handle_errors(void * dev_id);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -236,10 +236,8 @@ struct velocity_rd_info { ...@@ -236,10 +236,8 @@ struct velocity_rd_info {
struct velocity_td_info { struct velocity_td_info {
struct sk_buff *skb; struct sk_buff *skb;
u8 *buf;
int nskb_dma; int nskb_dma;
dma_addr_t skb_dma[7]; dma_addr_t skb_dma[7];
dma_addr_t buf_dma;
}; };
enum velocity_owner { enum velocity_owner {
...@@ -1506,9 +1504,6 @@ struct velocity_info { ...@@ -1506,9 +1504,6 @@ struct velocity_info {
dma_addr_t rd_pool_dma; dma_addr_t rd_pool_dma;
dma_addr_t td_pool_dma[TX_QUEUE_NO]; dma_addr_t td_pool_dma[TX_QUEUE_NO];
dma_addr_t tx_bufs_dma;
u8 *tx_bufs;
struct vlan_group *vlgrp; struct vlan_group *vlgrp;
u8 ip_addr[4]; u8 ip_addr[4];
enum chip_type chip_id; enum chip_type chip_id;
......
...@@ -550,7 +550,8 @@ static struct virtio_device_id id_table[] = { ...@@ -550,7 +550,8 @@ static struct virtio_device_id id_table[] = {
}; };
static unsigned int features[] = { static unsigned int features[] = {
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY, VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY,
}; };
......
...@@ -828,6 +828,19 @@ static inline void netif_napi_add(struct net_device *dev, ...@@ -828,6 +828,19 @@ static inline void netif_napi_add(struct net_device *dev,
set_bit(NAPI_STATE_SCHED, &napi->state); set_bit(NAPI_STATE_SCHED, &napi->state);
} }
/**
* netif_napi_del - remove a napi context
* @napi: napi context
*
* netif_napi_del() removes a napi context from the network device napi list
*/
static inline void netif_napi_del(struct napi_struct *napi)
{
#ifdef CONFIG_NETPOLL
list_del(&napi->dev_list);
#endif
}
struct packet_type { struct packet_type {
__be16 type; /* This is really htons(ether_type). */ __be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */ struct net_device *dev; /* NULL is wildcarded here */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment