Commit ed492288 authored by Jeff Garzik's avatar Jeff Garzik

Merge mandrakesoft.com:/home/jgarzik/vanilla/linus-2.5

into mandrakesoft.com:/home/jgarzik/repo/net-drivers-2.5
parents d9f2d50e 77d28a4f
......@@ -65,9 +65,15 @@
#include <asm/io.h>
#include <asm/uaccess.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define CP_VLAN_TAG_USED 1
#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
#else
#define CP_VLAN_TAG_USED 0
#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
do { (tx_desc)->opts2 = 0; } while (0)
#endif
/* These identify the driver base version and may not be removed. */
static char version[] __devinitdata =
......@@ -643,7 +649,7 @@ static void cp_tx (struct cp_private *cp)
cp->tx_tail = tx_tail;
if (netif_queue_stopped(cp->dev) && (TX_BUFFS_AVAIL(cp) > 1))
if (netif_queue_stopped(cp->dev) && (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)))
netif_wake_queue(cp->dev);
}
......@@ -658,9 +664,12 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
spin_lock_irq(&cp->lock);
/* This is a hard error, log it. */
if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irq(&cp->lock);
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
dev->name);
return 1;
}
......@@ -760,9 +769,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
if (netif_msg_tx_queued(cp))
printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
dev->name, entry, skb->len);
if (TX_BUFFS_AVAIL(cp) < 0)
BUG();
if (TX_BUFFS_AVAIL(cp) == 0)
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
spin_unlock_irq(&cp->lock);
......@@ -773,6 +780,9 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
return 0;
}
/* Set or clear the multicast filter for this adaptor.
This routine is not state sensitive and need not be SMP locked. */
static void __cp_set_rx_mode (struct net_device *dev)
{
struct cp_private *cp = dev->priv;
......@@ -1072,6 +1082,7 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
/* if network interface not up, no need for complexity */
if (!netif_running(dev)) {
dev->mtu = new_mtu;
cp_set_rxbufsize(cp); /* set new rx buf size */
return 0;
}
......@@ -1081,6 +1092,7 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
cp_stop_hw(cp); /* stop h/w and free rings */
cp_clean_rings(cp);
dev->mtu = new_mtu;
cp_set_rxbufsize(cp); /* set new rx buf size */
rc = cp_init_rings(cp); /* realloc and restart h/w */
......@@ -1226,7 +1238,7 @@ static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
}
#if CP_VLAN_TAG_USED
static int cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
struct cp_private *cp = dev->priv;
......@@ -1234,8 +1246,6 @@ static int cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
cp->vlgrp = grp;
cpw16(CpCmd, cpr16(CpCmd) | RxVlanOn);
spin_unlock_irq(&cp->lock);
return 0;
}
static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
......
......@@ -17,6 +17,7 @@
#include <linux/timer.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/crc32.h>
#include <asm/prom.h>
#include <asm/dbdma.h>
#include <asm/io.h>
......
......@@ -128,12 +128,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define E100_DEFAULT_TCB MAX_TCB
#define E100_MIN_TCB 2*TX_FRAME_CNT + 3 /* make room for at least 2 interrupts */
#ifdef __ia64__
/* We can't use too many DMAble buffers on IA64 machines with >4 GB mem */
#define E100_MAX_TCB 64
#else
#define E100_MAX_TCB 1024
#endif /* __ia64__ */
#define E100_DEFAULT_RFD MAX_RFD
#define E100_MIN_RFD 8
......@@ -766,6 +761,8 @@ typedef enum _non_tx_cmd_state_t {
#define IPCB_INSERTVLAN_ENABLE BIT_1
#define IPCB_IP_ACTIVATION_DEFAULT IPCB_HARDWAREPARSING_ENABLE
#define FOLD_CSUM(_XSUM) ((((_XSUM << 16) | (_XSUM >> 16)) + _XSUM) >> 16)
/* Transmit Buffer Descriptor (TBD)*/
typedef struct _tbd_t {
u32 tbd_buf_addr; /* Physical Transmit Buffer Address */
......@@ -1008,6 +1005,11 @@ struct e100_private {
u32 wolopts;
u16 ip_lbytes;
#endif
#ifdef CONFIG_PM
u32 pci_state[16];
#endif
};
#define E100_AUTONEG 0
......@@ -1030,4 +1032,9 @@ extern unsigned char e100_selftest(struct e100_private *bdp, u32 *st_timeout,
extern unsigned char e100_get_link_state(struct e100_private *bdp);
extern unsigned char e100_wait_scb(struct e100_private *bdp);
extern void e100_deisolate_driver(struct e100_private *bdp,
u8 recover, u8 full_reset);
extern unsigned char e100_hw_reset_recover(struct e100_private *bdp,
u32 reset_cmd);
#endif
......@@ -69,6 +69,10 @@ ANY LOSS OF USE; DATA, OR PROFITS; OR BUSINESS INTERUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
Portions (C) 2002 Red Hat, Inc. under the terms of the GNU GPL v2.
*******************************************************************************/
/**********************************************************************
......@@ -152,7 +156,7 @@ eeprom_set_semaphore(struct e100_private *adapter)
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
schedule_timeout(1+(HZ-1)/100);
}
return false;
}
......@@ -252,19 +256,12 @@ e100_eeprom_size(struct e100_private *adapter)
// Returns: bits in an address for that size eeprom
//----------------------------------------------------------------------------------------
static u16
static inline int
eeprom_address_size(u16 size)
{
switch (size) {
case 64:
return 6;
case 128:
return 7;
case 256:
return 8;
}
int isize = size;
return 0; //fix compiler warning or error!
return ffs(isize);
}
//----------------------------------------------------------------------------------------
......@@ -348,6 +345,7 @@ shift_out_bits(struct e100_private *adapter, u16 data, u16 count)
x |= EEDI;
writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
readw(&(adapter->scb->scb_status)); /* flush command to card */
udelay(EEPROM_STALL_TIME);
raise_clock(adapter, &x);
lower_clock(adapter, &x);
......@@ -374,6 +372,7 @@ raise_clock(struct e100_private *adapter, u16 *x)
{
*x = *x | EESK;
writew(*x, &CSR_EEPROM_CONTROL_FIELD(adapter));
readw(&(adapter->scb->scb_status)); /* flush command to card */
udelay(EEPROM_STALL_TIME);
}
......@@ -393,6 +392,7 @@ lower_clock(struct e100_private *adapter, u16 *x)
{
*x = *x & ~EESK;
writew(*x, &CSR_EEPROM_CONTROL_FIELD(adapter));
readw(&(adapter->scb->scb_status)); /* flush command to card */
udelay(EEPROM_STALL_TIME);
}
......@@ -498,7 +498,7 @@ e100_eeprom_write_word(struct e100_private *adapter, u16 reg, u16 data)
x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
x &= ~(EEDI | EEDO | EESK);
writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
wmb();
readw(&(adapter->scb->scb_status)); /* flush command to card */
udelay(EEPROM_STALL_TIME);
x |= EECS;
writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
......@@ -587,7 +587,7 @@ eeprom_wait_cmd_done(struct e100_private *adapter)
return true;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
schedule_timeout(1+(HZ-1)/100);
}
return false;
......@@ -606,9 +606,10 @@ eeprom_stand_by(struct e100_private *adapter)
x = readw(&CSR_EEPROM_CONTROL_FIELD(adapter));
x &= ~(EECS | EESK);
writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
wmb();
readw(&(adapter->scb->scb_status)); /* flush command to card */
udelay(EEPROM_STALL_TIME);
x |= EECS;
writew(x, &CSR_EEPROM_CONTROL_FIELD(adapter));
readw(&(adapter->scb->scb_status)); /* flush command to card */
udelay(EEPROM_STALL_TIME);
}
This diff is collapsed.
......@@ -769,7 +769,7 @@ e100_set_fc(struct e100_private *bdp)
* Arguments: bdp - Pointer to the e100_private structure for the board
*
* Returns: true if link state was changed
* B_FLASE otherwise
* false otherwise
*
*/
unsigned char
......
......@@ -64,8 +64,8 @@ static int debug = -1; /* The debug level */
/* A few values that may be tweaked. */
/* The ring sizes should be a power of two for efficiency. */
#define TX_RING_SIZE 32
#define RX_RING_SIZE 32
#define TX_RING_SIZE 64
#define RX_RING_SIZE 64
/* How much slots multicast filter setup may take.
Do not descrease without changing set_rx_mode() implementaion. */
#define TX_MULTICAST_SIZE 2
......@@ -570,6 +570,19 @@ static int __devinit eepro100_init_one (struct pci_dev *pdev,
if (speedo_debug > 0 && did_version++ == 0)
printk(version);
/* save power state before pci_enable_device overwrites it */
pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pm) {
u16 pwr_command;
pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
}
if (pci_enable_device(pdev))
goto err_out_free_mmio_region;
pci_set_master(pdev);
if (!request_region(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1), "eepro100")) {
printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
......@@ -600,18 +613,6 @@ static int __devinit eepro100_init_one (struct pci_dev *pdev,
pci_resource_start(pdev, 0), irq);
#endif
/* save power state b4 pci_enable_device overwrites it */
pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pm) {
u16 pwr_command;
pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
}
if (pci_enable_device(pdev))
goto err_out_free_mmio_region;
pci_set_master(pdev);
if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
cards_found++;
......@@ -1074,6 +1075,51 @@ static void speedo_resume(struct net_device *dev)
outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
}
/*
* Sometimes the receiver stops making progress. This routine knows how to
* get it going again, without losing packets or being otherwise nasty like
* a chip reset would be. Previously the driver had a whole sequence
* of if RxSuspended, if it's no buffers do one thing, if it's no resources,
* do another, etc. But those things don't really matter. Separate logic
* in the ISR provides for allocating buffers--the other half of operation
* is just making sure the receiver is active. speedo_rx_soft_reset does that.
* This problem with the old, more involved algorithm is shown up under
* ping floods on the order of 60K packets/second on a 100Mbps fdx network.
*/
static void
speedo_rx_soft_reset(struct net_device *dev)
{
struct speedo_private *sp = dev->priv;
struct RxFD *rfd;
long ioaddr;
ioaddr = dev->base_addr;
wait_for_cmd_done(ioaddr + SCBCmd);
if (inb(ioaddr + SCBCmd) != 0) {
printk("%s: previous command stalled\n", dev->name);
return;
}
/*
* Put the hardware into a known state.
*/
outb(RxAbort, ioaddr + SCBCmd);
rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
rfd->rx_buf_addr = 0xffffffff;
wait_for_cmd_done(ioaddr + SCBCmd);
if (inb(ioaddr + SCBCmd) != 0) {
printk("%s: RxAbort command stalled\n", dev->name);
return;
}
outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
ioaddr + SCBPointer);
outb(RxStart, ioaddr + SCBCmd);
}
/* Media monitoring and control. */
static void speedo_timer(unsigned long data)
{
......@@ -1377,9 +1423,10 @@ speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* workaround for hardware bug on 10 mbit half duplex */
if ((sp->partner == 0) || (sp->chip_id == 1)) {
if ((sp->partner == 0) && (sp->chip_id == 1)) {
wait_for_cmd_done(ioaddr + SCBCmd);
outb(0 , ioaddr + SCBCmd);
udelay(1);
}
/* Trigger the command unit resume. */
......@@ -1507,82 +1554,39 @@ static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
if ((status & 0xfc00) == 0)
break;
/* Always check if all rx buffers are allocated. --SAW */
speedo_refill_rx_buffers(dev, 0);
if ((status & 0x5000) || /* Packet received, or Rx error. */
(sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
/* Need to gather the postponed packet. */
speedo_rx(dev);
if (status & 0x1000) {
spin_lock(&sp->lock);
if ((status & 0x003c) == 0x0028) { /* No more Rx buffers. */
struct RxFD *rxf;
printk(KERN_WARNING "%s: card reports no RX buffers.\n",
dev->name);
rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
if (rxf == NULL) {
if (speedo_debug > 2)
printk(KERN_DEBUG
"%s: NULL cur_rx in speedo_interrupt().\n",
dev->name);
sp->rx_ring_state |= RrNoMem|RrNoResources;
} else if (rxf == sp->last_rxf) {
if (speedo_debug > 2)
printk(KERN_DEBUG
"%s: cur_rx is last in speedo_interrupt().\n",
dev->name);
sp->rx_ring_state |= RrNoMem|RrNoResources;
} else
outb(RxResumeNoResources, ioaddr + SCBCmd);
} else if ((status & 0x003c) == 0x0008) { /* No resources. */
struct RxFD *rxf;
printk(KERN_WARNING "%s: card reports no resources.\n",
dev->name);
rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
if (rxf == NULL) {
if (speedo_debug > 2)
printk(KERN_DEBUG
"%s: NULL cur_rx in speedo_interrupt().\n",
dev->name);
sp->rx_ring_state |= RrNoMem|RrNoResources;
} else if (rxf == sp->last_rxf) {
if (speedo_debug > 2)
printk(KERN_DEBUG
"%s: cur_rx is last in speedo_interrupt().\n",
dev->name);
sp->rx_ring_state |= RrNoMem|RrNoResources;
} else {
/* Restart the receiver. */
outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
ioaddr + SCBPointer);
outb(RxStart, ioaddr + SCBCmd);
}
}
sp->stats.rx_errors++;
spin_unlock(&sp->lock);
}
/* Always check if all rx buffers are allocated. --SAW */
speedo_refill_rx_buffers(dev, 0);
if ((sp->rx_ring_state&(RrNoMem|RrNoResources)) == RrNoResources) {
printk(KERN_WARNING
"%s: restart the receiver after a possible hang.\n",
dev->name);
spin_lock(&sp->lock);
/* Restart the receiver.
I'm not sure if it's always right to restart the receiver
here but I don't know another way to prevent receiver hangs.
1999/12/25 SAW */
outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
ioaddr + SCBPointer);
outb(RxStart, ioaddr + SCBCmd);
sp->rx_ring_state &= ~RrNoResources;
spin_unlock(&sp->lock);
/*
* The chip may have suspended reception for various reasons.
* Check for that, and re-prime it should this be the case.
*/
switch ((status >> 2) & 0xf) {
case 0: /* Idle */
break;
case 1: /* Suspended */
case 2: /* No resources (RxFDs) */
case 9: /* Suspended with no more RBDs */
case 10: /* No resources due to no RBDs */
case 12: /* Ready with no RBDs */
speedo_rx_soft_reset(dev);
break;
case 3: case 5: case 6: case 7: case 8:
case 11: case 13: case 14: case 15:
/* these are all reserved values */
break;
}
/* User interrupt, Command/Tx unit interrupt or CU not active. */
if (status & 0xA400) {
spin_lock(&sp->lock);
speedo_tx_buffer_gc(dev);
if (sp->tx_full
&& (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
......@@ -1590,9 +1594,10 @@ static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
sp->tx_full = 0;
netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
}
spin_unlock(&sp->lock);
}
spin_unlock(&sp->lock);
if (--boguscnt < 0) {
printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
dev->name, status);
......@@ -2203,6 +2208,8 @@ static int eepro100_suspend(struct pci_dev *pdev, u32 state)
if (!netif_running(dev))
return 0;
del_timer_sync(&sp->timer);
netif_device_detach(dev);
outl(PortPartialReset, ioaddr + SCBPort);
......@@ -2234,6 +2241,8 @@ static int eepro100_resume(struct pci_dev *pdev)
sp->rx_mode = -1;
sp->flow_ctrl = sp->partner = 0;
set_rx_mode(dev);
sp->timer.expires = RUN_AT(2*HZ);
add_timer(&sp->timer);
return 0;
}
#endif /* CONFIG_PM */
......
This diff is collapsed.
/* $Id: tg3.h,v 1.37.2.30 2002/03/05 10:08:39 davem Exp $
/* $Id: tg3.h,v 1.37.2.32 2002/03/11 12:18:18 davem Exp $
* tg3.h: Definitions for Broadcom Tigon3 ethernet driver.
*
* Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)
......@@ -713,13 +713,17 @@
#define DEFAULT_RXCOL_TICKS 0x00000048
#define HIGH_RXCOL_TICKS 0x00000096
#define HOSTCC_TXCOL_TICKS 0x00003c0c
#define LOW_TXCOL_TICKS 0x00000096
#define DEFAULT_TXCOL_TICKS 0x0000012c
#define HIGH_TXCOL_TICKS 0x00000145
#define HOSTCC_RXMAX_FRAMES 0x00003c10
#define LOW_RXMAX_FRAMES 0x00000005
#define DEFAULT_RXMAX_FRAMES 0x00000008
#define HIGH_RXMAX_FRAMES 0x00000012
#define HOSTCC_TXMAX_FRAMES 0x00003c14
#define LOW_TXMAX_FRAMES 0x00000035
#define DEFAULT_TXMAX_FRAMES 0x0000004b
#define HIGH_TXMAX_FRAMES 0x00000052
#define HOSTCC_RXCOAL_TICK_INT 0x00003c18
#define DEFAULT_RXCOAL_TICK_INT 0x00000019
#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c
......@@ -1681,17 +1685,42 @@ struct tg3_link_config {
};
struct tg3_coalesce_config {
/* Current settings. */
u32 rx_coalesce_ticks;
u32 rx_max_coalesced_frames;
u32 rx_coalesce_ticks_during_int;
u32 rx_max_coalesced_frames_during_int;
u32 tx_coalesce_ticks;
u32 tx_max_coalesced_frames;
u32 tx_coalesce_ticks_during_int;
u32 tx_max_coalesced_frames_during_int;
u32 stats_coalesce_ticks;
/* Default settings. */
u32 rx_coalesce_ticks_def;
u32 rx_max_coalesced_frames_def;
u32 rx_coalesce_ticks_during_int_def;
u32 rx_max_coalesced_frames_during_int_def;
u32 tx_coalesce_ticks_def;
u32 tx_max_coalesced_frames_def;
u32 tx_coalesce_ticks_during_int_def;
u32 tx_max_coalesced_frames_during_int_def;
u32 stats_coalesce_ticks_def;
/* Adaptive RX/TX coalescing parameters. */
u32 rate_sample_jiffies;
u32 pkt_rate_low;
u32 pkt_rate_high;
u32 rx_coalesce_ticks_low;
u32 rx_max_coalesced_frames_low;
u32 tx_coalesce_ticks_low;
u32 tx_max_coalesced_frames_low;
u32 rx_coalesce_ticks_high;
u32 rx_max_coalesced_frames_high;
u32 tx_coalesce_ticks_high;
u32 tx_max_coalesced_frames_high;
};
struct tg3_bufmgr_config {
......@@ -1720,6 +1749,7 @@ struct tg3 {
spinlock_t indirect_lock;
struct net_device_stats net_stats;
struct net_device_stats net_stats_prev;
unsigned long phy_crc_errors;
/* Adaptive coalescing engine. */
......@@ -1731,9 +1761,11 @@ struct tg3 {
u32 tg3_flags;
#define TG3_FLAG_HOST_TXDS 0x00000001
#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
#define TG3_FLAG_BROKEN_CHECKSUMS 0x00000004
#define TG3_FLAG_RX_CHECKSUMS 0x00000004
#define TG3_FLAG_USE_LINKCHG_REG 0x00000008
#define TG3_FLAG_USE_MI_INTERRUPT 0x00000010
#define TG3_FLAG_ADAPTIVE_RX 0x00000020
#define TG3_FLAG_ADAPTIVE_TX 0x00000040
#define TG3_FLAG_PHY_RESET_ON_INIT 0x00000100
#define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200
#define TG3_FLAG_TAGGED_IRQ_STATUS 0x00000400
......@@ -1751,6 +1783,10 @@ struct tg3 {
#define TG3_FLAG_AUTONEG_DISABLE 0x00400000
#define TG3_FLAG_JUMBO_ENABLE 0x00800000
#define TG3_FLAG_10_100_ONLY 0x01000000
#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
#define TG3_FLAG_PAUSE_RX 0x04000000
#define TG3_FLAG_PAUSE_TX 0x08000000
#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
#define TG3_FLAG_INIT_COMPLETE 0x80000000
u32 msg_enable;
......@@ -1764,6 +1800,13 @@ struct tg3 {
struct tg3_coalesce_config coalesce_config;
struct tg3_bufmgr_config bufmgr_config;
u32 rx_pending;
#if TG3_MINI_RING_WORKS
u32 rx_mini_pending;
#endif
u32 rx_jumbo_pending;
u32 tx_pending;
/* cache h/w values, often passed straight to h/w */
u32 rx_mode;
u32 tx_mode;
......
......@@ -72,6 +72,144 @@ struct ethtool_eeprom {
u32 len; /* in bytes */
u8 data[0];
};
/* for configuring coalescing parameters of chip */
struct ethtool_coalesce {
u32 cmd; /* ETHTOOL_{G,S}COALESCE */
/* How many usecs to delay an RX interrupt after
* a packet arrives. If 0, only rx_max_coalesced_frames
* is used.
*/
u32 rx_coalesce_usecs;
/* How many packets to delay an RX interrupt after
* a packet arrives. If 0, only rx_coalesce_usecs is
* used. It is illegal to set both usecs and max frames
* to zero as this would cause RX interrupts to never be
* generated.
*/
u32 rx_max_coalesced_frames;
/* Same as above two parameters, except that these values
* apply while an IRQ is being services by the host. Not
* all cards support this feature and the values are ignored
* in that case.
*/
u32 rx_coalesce_usecs_irq;
u32 rx_max_coalesced_frames_irq;
/* How many usecs to delay a TX interrupt after
* a packet is sent. If 0, only tx_max_coalesced_frames
* is used.
*/
u32 tx_coalesce_usecs;
/* How many packets to delay a TX interrupt after
* a packet is sent. If 0, only tx_coalesce_usecs is
* used. It is illegal to set both usecs and max frames
* to zero as this would cause TX interrupts to never be
* generated.
*/
u32 tx_max_coalesced_frames;
/* Same as above two parameters, except that these values
* apply while an IRQ is being services by the host. Not
* all cards support this feature and the values are ignored
* in that case.
*/
u32 tx_coalesce_usecs_irq;
u32 tx_max_coalesced_frames_irq;
/* How many usecs to delay in-memory statistics
* block updates. Some drivers do not have an in-memory
* statistic block, and in such cases this value is ignored.
* This value must not be zero.
*/
u32 stats_block_coalesce_usecs;
/* Adaptive RX/TX coalescing is an algorithm implemented by
* some drivers to improve latency under low packet rates and
* improve throughput under high packet rates. Some drivers
* only implement one of RX or TX adaptive coalescing. Anything
* not implemented by the driver causes these values to be
* silently ignored.
*/
u32 use_adaptive_rx_coalesce;
u32 use_adaptive_tx_coalesce;
/* When the packet rate (measured in packets per second)
* is below pkt_rate_low, the {rx,tx}_*_low parameters are
* used.
*/
u32 pkt_rate_low;
u32 rx_coalesce_usecs_low;
u32 rx_max_coalesced_frames_low;
u32 tx_coalesce_usecs_low;
u32 tx_max_coalesced_frames_low;
/* When the packet rate is below pkt_rate_high but above
* pkt_rate_low (both measured in packets per second) the
* normal {rx,tx}_* coalescing parameters are used.
*/
/* When the packet rate is (measured in packets per second)
* is above pkt_rate_high, the {rx,tx}_*_high parameters are
* used.
*/
u32 pkt_rate_high;
u32 rx_coalesce_usecs_high;
u32 rx_max_coalesced_frames_high;
u32 tx_coalesce_usecs_high;
u32 tx_max_coalesced_frames_high;
/* How often to do adaptive coalescing packet rate sampling,
* measured in seconds. Must not be zero.
*/
u32 rate_sample_interval;
};
/* for configuring RX/TX ring parameters */
struct ethtool_ringparam {
u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
/* Read only attributes. These indicate the maximum number
* of pending RX/TX ring entries the driver will allow the
* user to set.
*/
u32 rx_max_pending;
u32 rx_mini_max_pending;
u32 rx_jumbo_max_pending;
u32 tx_max_pending;
/* Values changeable by the user. The valid values are
* in the range 1 to the "*_max_pending" counterpart above.
*/
u32 rx_pending;
u32 rx_mini_pending;
u32 rx_jumbo_pending;
u32 tx_pending;
};
/* for configuring link flow control parameters */
struct ethtool_pauseparam {
u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
/* If the link is being auto-negotiated (via ethtool_cmd.autoneg
* being true) the user may set 'autonet' here non-zero to have the
* pause parameters be auto-negotiated too. In such a case, the
* {rx,tx}_pause values below determine what capabilities are
* advertised.
*
* If 'autoneg' is zero or the link is not being auto-negotiated,
* then {rx,tx}_pause force the driver to use/not-use pause
* flow control.
*/
u32 autoneg;
u32 rx_pause;
u32 tx_pause;
};
/* CMDs currently supported */
#define ETHTOOL_GSET 0x00000001 /* Get settings. */
#define ETHTOOL_SSET 0x00000002 /* Set settings, privileged. */
......@@ -82,9 +220,23 @@ struct ethtool_eeprom {
#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv. */
#define ETHTOOL_GLINK 0x0000000a /* Get link status */
#define ETHTOOL_GLINK 0x0000000a /* Get link status (ethtool_value) */
#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config */
#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters */
#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters */
#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
* (ethtool_value) */
#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
* (ethtool_value) */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment