Commit 6382e49f authored by Edward Peng's avatar Edward Peng Committed by Jeff Garzik

dl2k gige net driver updates:

* fix carrier and rx frame error miscounts
* fix vlan bug
* h/w dma bug workaround
* ISR chip-based timer scheme
parent 4e130235
...@@ -29,24 +29,28 @@ ...@@ -29,24 +29,28 @@
1.08 2002/01/17 Fixed the multicast bug. 1.08 2002/01/17 Fixed the multicast bug.
1.09 2002/03/07 Move rx-poll-now to re-fill loop. 1.09 2002/03/07 Move rx-poll-now to re-fill loop.
Added rio_timer() to watch rx buffers. Added rio_timer() to watch rx buffers.
1.10 2002/04/16 Fixed miscount of carrier error.
1.11 2002/05/23 Added ISR schedule scheme.
Fixed miscount of rx frame error for DGE-550SX.
Fixed VLAN bug.
*/ */
#include "dl2k.h" #include "dl2k.h"
static char version[] __devinitdata = static char version[] __devinitdata =
KERN_INFO "D-Link DL2000-based linux driver v1.09 2002/03/07\n"; KERN_INFO "D-Link DL2000-based linux driver v1.11 2002/05/23\n";
#define MAX_UNITS 8 #define MAX_UNITS 8
static int mtu[MAX_UNITS]; static int mtu[MAX_UNITS];
static int vlan[MAX_UNITS]; static int vlan[MAX_UNITS];
static int jumbo[MAX_UNITS]; static int jumbo[MAX_UNITS];
static char *media[MAX_UNITS]; static char *media[MAX_UNITS];
static int tx_flow[MAX_UNITS]; static int tx_flow=-1;
static int rx_flow[MAX_UNITS]; static int rx_flow=-1;
static int copy_thresh; static int copy_thresh;
static int rx_coalesce; /* Rx frame count each interrupt */ static int rx_coalesce=10; /* Rx frame count each interrupt */
static int rx_timeout; /* Rx DMA wait time in 64ns increments */ static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */
static int tx_coalesce = DEFAULT_TXC; /* HW xmit count each TxComplete [1-8] */ static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */
MODULE_AUTHOR ("Edward Peng"); MODULE_AUTHOR ("Edward Peng");
...@@ -56,16 +60,16 @@ MODULE_PARM (mtu, "1-" __MODULE_STRING (MAX_UNITS) "i"); ...@@ -56,16 +60,16 @@ MODULE_PARM (mtu, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (media, "1-" __MODULE_STRING (MAX_UNITS) "s"); MODULE_PARM (media, "1-" __MODULE_STRING (MAX_UNITS) "s");
MODULE_PARM (vlan, "1-" __MODULE_STRING (MAX_UNITS) "i"); MODULE_PARM (vlan, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (jumbo, "1-" __MODULE_STRING (MAX_UNITS) "i"); MODULE_PARM (jumbo, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (tx_flow, "1-" __MODULE_STRING (MAX_UNITS) "i"); MODULE_PARM (tx_flow, "i");
MODULE_PARM (rx_flow, "1-" __MODULE_STRING (MAX_UNITS) "i"); MODULE_PARM (rx_flow, "i");
MODULE_PARM (copy_thresh, "i"); MODULE_PARM (copy_thresh, "i");
MODULE_PARM (rx_coalesce, "i"); /* Rx frame count each interrupt */ MODULE_PARM (rx_coalesce, "i"); /* Rx frame count each interrupt */
MODULE_PARM (rx_timeout, "i"); /* Rx DMA wait time in 64ns increments */ MODULE_PARM (rx_timeout, "i"); /* Rx DMA wait time in 64ns increments */
MODULE_PARM (tx_coalesce, "i"); /* HW xmit count each TxComplete [1-8] */ MODULE_PARM (tx_coalesce, "i"); /* HW xmit count each TxDMAComplete */
/* Enable the default interrupts */ /* Enable the default interrupts */
#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxComplete| \ #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
UpdateStats | LinkEvent) UpdateStats | LinkEvent)
#define EnableInt() \ #define EnableInt() \
writew(DEFAULT_INTR, ioaddr + IntEnable) writew(DEFAULT_INTR, ioaddr + IntEnable)
...@@ -75,16 +79,18 @@ static int multicast_filter_limit = 0x40; ...@@ -75,16 +79,18 @@ static int multicast_filter_limit = 0x40;
static int rio_open (struct net_device *dev); static int rio_open (struct net_device *dev);
static void rio_timer (unsigned long data); static void rio_timer (unsigned long data);
static void tx_timeout (struct net_device *dev); static void rio_tx_timeout (struct net_device *dev);
static void alloc_list (struct net_device *dev); static void alloc_list (struct net_device *dev);
static int start_xmit (struct sk_buff *skb, struct net_device *dev); static int start_xmit (struct sk_buff *skb, struct net_device *dev);
static void rio_interrupt (int irq, void *dev_instance, struct pt_regs *regs); static void rio_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
static void rio_free_tx (struct net_device *dev, int irq);
static void tx_error (struct net_device *dev, int tx_status); static void tx_error (struct net_device *dev, int tx_status);
static int receive_packet (struct net_device *dev); static int receive_packet (struct net_device *dev);
static void rio_error (struct net_device *dev, int int_status); static void rio_error (struct net_device *dev, int int_status);
static int change_mtu (struct net_device *dev, int new_mtu); static int change_mtu (struct net_device *dev, int new_mtu);
static void set_multicast (struct net_device *dev); static void set_multicast (struct net_device *dev);
static struct net_device_stats *get_stats (struct net_device *dev); static struct net_device_stats *get_stats (struct net_device *dev);
static int clear_stats (struct net_device *dev);
static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
static int rio_close (struct net_device *dev); static int rio_close (struct net_device *dev);
static int find_miiphy (struct net_device *dev); static int find_miiphy (struct net_device *dev);
...@@ -98,9 +104,6 @@ static int mii_get_media_pcs (struct net_device *dev); ...@@ -98,9 +104,6 @@ static int mii_get_media_pcs (struct net_device *dev);
static int mii_read (struct net_device *dev, int phy_addr, int reg_num); static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
static int mii_write (struct net_device *dev, int phy_addr, int reg_num, static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
u16 data); u16 data);
#ifdef RIO_DEBUG
static int rio_ioctl_ext (struct net_device *dev, struct ioctl_data *iodata);
#endif
static int __devinit static int __devinit
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
...@@ -109,7 +112,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -109,7 +112,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
struct netdev_private *np; struct netdev_private *np;
static int card_idx; static int card_idx;
int chip_idx = ent->driver_data; int chip_idx = ent->driver_data;
int err, irq = pdev->irq; int err, irq;
long ioaddr; long ioaddr;
static int version_printed; static int version_printed;
void *ring_space; void *ring_space;
...@@ -122,6 +125,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -122,6 +125,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) if (err)
return err; return err;
irq = pdev->irq;
err = pci_request_regions (pdev, "dl2k"); err = pci_request_regions (pdev, "dl2k");
if (err) if (err)
goto err_out_disable; goto err_out_disable;
...@@ -149,7 +153,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -149,7 +153,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np = dev->priv; np = dev->priv;
np->chip_id = chip_idx; np->chip_id = chip_idx;
np->pdev = pdev; np->pdev = pdev;
spin_lock_init (&np->lock); spin_lock_init (&np->tx_lock);
spin_lock_init (&np->rx_lock); spin_lock_init (&np->rx_lock);
/* Parse manual configuration */ /* Parse manual configuration */
...@@ -199,17 +203,18 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -199,17 +203,18 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
} }
np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
vlan[card_idx] : 0; vlan[card_idx] : 0;
if (rx_coalesce != 0 && rx_timeout != 0) { if (rx_coalesce > 0 && rx_timeout > 0) {
np->rx_coalesce = rx_coalesce; np->rx_coalesce = rx_coalesce;
np->rx_timeout = rx_timeout; np->rx_timeout = rx_timeout;
np->coalesce = 1; np->coalesce = 1;
} }
np->tx_flow = (tx_flow[card_idx]) ? 1 : 0; np->tx_flow = (tx_flow == 0) ? 0 : 1;
np->rx_flow = (rx_flow[card_idx]) ? 1 : 0; np->rx_flow = (rx_flow == 0) ? 0 : 1;
if (tx_coalesce < 1) if (tx_coalesce < 1)
tx_coalesce = 1; tx_coalesce = 1;
if (tx_coalesce > 8) if (tx_coalesce > TX_RING_SIZE-1)
tx_coalesce = 8; tx_coalesce = TX_RING_SIZE - 1;
} }
dev->open = &rio_open; dev->open = &rio_open;
dev->hard_start_xmit = &start_xmit; dev->hard_start_xmit = &start_xmit;
...@@ -217,7 +222,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -217,7 +222,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->get_stats = &get_stats; dev->get_stats = &get_stats;
dev->set_multicast_list = &set_multicast; dev->set_multicast_list = &set_multicast;
dev->do_ioctl = &rio_ioctl; dev->do_ioctl = &rio_ioctl;
dev->tx_timeout = &tx_timeout; dev->tx_timeout = &rio_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT; dev->watchdog_timeo = TX_TIMEOUT;
dev->change_mtu = &change_mtu; dev->change_mtu = &change_mtu;
#if 0 #if 0
...@@ -247,6 +252,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -247,6 +252,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
/* Fiber device? */ /* Fiber device? */
np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0; np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
np->link_status = 0;
/* Set media and reset PHY */ /* Set media and reset PHY */
if (np->phy_media) { if (np->phy_media) {
/* default 1000mbps_fd for fiber deivices */ /* default 1000mbps_fd for fiber deivices */
...@@ -281,6 +287,15 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -281,6 +287,15 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->name, np->name, dev->name, np->name,
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq); dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq);
if (tx_coalesce > 1)
printk(KERN_INFO "tx_coalesce:\t%d packets\n",
tx_coalesce);
if (np->coalesce)
printk(KERN_INFO "rx_coalesce:\t%d packets\n"
KERN_INFO "rx_timeout: \t%d ns\n",
np->rx_coalesce, np->rx_timeout*640);
if (np->vlan)
printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
return 0; return 0;
err_out_unmap_rx: err_out_unmap_rx:
...@@ -327,7 +342,7 @@ find_miiphy (struct net_device *dev) ...@@ -327,7 +342,7 @@ find_miiphy (struct net_device *dev)
return 0; return 0;
} }
static int __devinit int
parse_eeprom (struct net_device *dev) parse_eeprom (struct net_device *dev)
{ {
int i, j; int i, j;
...@@ -346,7 +361,7 @@ parse_eeprom (struct net_device *dev) ...@@ -346,7 +361,7 @@ parse_eeprom (struct net_device *dev)
} }
/* Check CRC */ /* Check CRC */
crc = ~ether_crc_le(256 - 4, sromdata); crc = ~ether_crc_le(256 - 4, sromdata);
if (psrom->crc != crc) { if (psrom->crc != crc) {
printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name); printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name);
return -1; return -1;
...@@ -431,8 +446,10 @@ rio_open (struct net_device *dev) ...@@ -431,8 +446,10 @@ rio_open (struct net_device *dev)
writeb (0xff, ioaddr + TxDMAPollPeriod); writeb (0xff, ioaddr + TxDMAPollPeriod);
writeb (0x30, ioaddr + RxDMABurstThresh); writeb (0x30, ioaddr + RxDMABurstThresh);
writeb (0x30, ioaddr + RxDMAUrgentThresh); writeb (0x30, ioaddr + RxDMAUrgentThresh);
netif_start_queue (dev);
writel (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl); /* clear statistics */
clear_stats (dev);
/* VLAN supported */ /* VLAN supported */
if (np->vlan) { if (np->vlan) {
/* priority field in RxDMAIntCtrl */ /* priority field in RxDMAIntCtrl */
...@@ -451,15 +468,20 @@ rio_open (struct net_device *dev) ...@@ -451,15 +468,20 @@ rio_open (struct net_device *dev)
/* Enable default interrupts */ /* Enable default interrupts */
EnableInt (); EnableInt ();
/* clear statistics */
get_stats (dev);
init_timer (&np->timer); init_timer (&np->timer);
np->timer.expires = jiffies + 1*HZ; np->timer.expires = jiffies + 1*HZ;
np->timer.data = (unsigned long) dev; np->timer.data = (unsigned long) dev;
np->timer.function = &rio_timer; np->timer.function = &rio_timer;
add_timer (&np->timer); add_timer (&np->timer);
/* Start Tx/Rx */
writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
ioaddr + MACCtrl);
netif_start_queue (dev);
return 0; return 0;
} }
static void static void
rio_timer (unsigned long data) rio_timer (unsigned long data)
{ {
...@@ -469,10 +491,10 @@ rio_timer (unsigned long data) ...@@ -469,10 +491,10 @@ rio_timer (unsigned long data)
int next_tick = 1*HZ; int next_tick = 1*HZ;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&np->rx_lock, flags);
/* Recover rx ring exhausted error */ /* Recover rx ring exhausted error */
if (np->cur_rx - np->old_rx >= RX_RING_SIZE) { if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
printk(KERN_INFO "Try to recover rx ring exhausted...\n"); printk(KERN_INFO "Try to recover rx ring exhausted...\n");
spin_lock_irqsave(&np->rx_lock, flags);
/* Re-allocate skbuffs to fill the descriptor ring */ /* Re-allocate skbuffs to fill the descriptor ring */
for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -500,43 +522,22 @@ rio_timer (unsigned long data) ...@@ -500,43 +522,22 @@ rio_timer (unsigned long data)
cpu_to_le64 (np->rx_buf_sz) << 48; cpu_to_le64 (np->rx_buf_sz) << 48;
np->rx_ring[entry].status = 0; np->rx_ring[entry].status = 0;
} /* end for */ } /* end for */
spin_unlock_irqrestore (&np->rx_lock, flags);
} /* end if */ } /* end if */
spin_unlock_irqrestore (&np->rx_lock, flags);
np->timer.expires = jiffies + next_tick; np->timer.expires = jiffies + next_tick;
add_timer(&np->timer); add_timer(&np->timer);
} }
static void static void
tx_timeout (struct net_device *dev) rio_tx_timeout (struct net_device *dev)
{ {
struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr; long ioaddr = dev->base_addr;
printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
dev->name, readl (ioaddr + TxStatus)); dev->name, readl (ioaddr + TxStatus));
/* Free used tx skbuffs */ rio_free_tx(dev, 0);
for (; np->cur_tx - np->old_tx > 0; np->old_tx++) {
int entry = np->old_tx % TX_RING_SIZE;
struct sk_buff *skb;
if (!(np->tx_ring[entry].status & TFDDone))
break;
skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev,
np->tx_ring[entry].fraginfo,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq (skb);
np->tx_skbuff[entry] = 0;
}
dev->if_port = 0; dev->if_port = 0;
dev->trans_start = jiffies; dev->trans_start = jiffies;
np->stats.tx_errors++;
/* If the ring is no longer full, clear tx_full and
call netif_wake_queue() */
if (np->tx_full && np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1) {
np->tx_full = 0;
netif_wake_queue (dev);
}
} }
/* allocate and initialize Tx and Rx descriptors */ /* allocate and initialize Tx and Rx descriptors */
...@@ -546,7 +547,6 @@ alloc_list (struct net_device *dev) ...@@ -546,7 +547,6 @@ alloc_list (struct net_device *dev)
struct netdev_private *np = dev->priv; struct netdev_private *np = dev->priv;
int i; int i;
np->tx_full = 0;
np->cur_rx = np->cur_tx = 0; np->cur_rx = np->cur_tx = 0;
np->old_rx = np->old_tx = 0; np->old_rx = np->old_tx = 0;
np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
...@@ -605,18 +605,17 @@ start_xmit (struct sk_buff *skb, struct net_device *dev) ...@@ -605,18 +605,17 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
struct netdev_desc *txdesc; struct netdev_desc *txdesc;
unsigned entry; unsigned entry;
u32 ioaddr; u32 ioaddr;
int tx_shift; u64 tfc_vlan_tag = 0;
unsigned long flags;
if (np->link_status == 0) { /* Link Down */
dev_kfree_skb(skb);
return 0;
}
ioaddr = dev->base_addr; ioaddr = dev->base_addr;
entry = np->cur_tx % TX_RING_SIZE; entry = np->cur_tx % TX_RING_SIZE;
np->tx_skbuff[entry] = skb; np->tx_skbuff[entry] = skb;
txdesc = &np->tx_ring[entry]; txdesc = &np->tx_ring[entry];
/* Set TFDDone to avoid TxDMA gather this descriptor */
txdesc->status = cpu_to_le64 (TFDDone);
txdesc->status |=
cpu_to_le64 (entry | WordAlignDisable | (1 << FragCountShift));
#if 0 #if 0
if (skb->ip_summed == CHECKSUM_HW) { if (skb->ip_summed == CHECKSUM_HW) {
txdesc->status |= txdesc->status |=
...@@ -630,31 +629,33 @@ start_xmit (struct sk_buff *skb, struct net_device *dev) ...@@ -630,31 +629,33 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
(cpu_to_le64 (np->vlan) << 32) | (cpu_to_le64 (np->vlan) << 32) |
(cpu_to_le64 (skb->priority) << 45); (cpu_to_le64 (skb->priority) << 45);
} }
/* Send one packet each time at 10Mbps mode */
/* Tx coalescing loop do not exceed 8 */
if (entry % tx_coalesce == 0 || np->speed == 10)
txdesc->status |= cpu_to_le64 (TxIndicate);
txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
skb->len, skb->len,
PCI_DMA_TODEVICE)); PCI_DMA_TODEVICE));
txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48; txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48;
/* Clear TFDDone, then TxDMA start to send this descriptor */ /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
txdesc->status &= ~cpu_to_le64 (TFDDone); * Work around: Always use 1 descriptor in 10Mbps mode */
if (entry % tx_coalesce == 0 || np->speed == 10)
DEBUG_TFD_DUMP (np); txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
WordAlignDisable |
TxDMAIndicate |
(1 << FragCountShift));
else
txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
WordAlignDisable |
(1 << FragCountShift));
/* TxDMAPollNow */ /* TxDMAPollNow */
writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl); writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);
np->cur_tx++; /* Schedule ISR */
if (np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1 && np->speed != 10) { writel(10000, ioaddr + CountDown);
np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
< TX_QUEUE_LEN - 1 && np->speed != 10) {
/* do nothing */ /* do nothing */
} else { } else if (!netif_queue_stopped(dev)) {
spin_lock_irqsave(&np->lock, flags);
np->tx_full = 1;
netif_stop_queue (dev); netif_stop_queue (dev);
spin_unlock_irqrestore (&np->lock, flags);
} }
/* The first TFDListPtr */ /* The first TFDListPtr */
...@@ -664,14 +665,6 @@ start_xmit (struct sk_buff *skb, struct net_device *dev) ...@@ -664,14 +665,6 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
writel (0, dev->base_addr + TFDListPtr1); writel (0, dev->base_addr + TFDListPtr1);
} }
if (np->old_tx > TX_RING_SIZE) {
spin_lock_irqsave (&np->lock, flags);
tx_shift = TX_RING_SIZE;
np->old_tx -= tx_shift;
np->cur_tx -= tx_shift;
spin_unlock_irqrestore (&np->lock, flags);
}
/* NETDEV WATCHDOG timer */ /* NETDEV WATCHDOG timer */
dev->trans_start = jiffies; dev->trans_start = jiffies;
return 0; return 0;
...@@ -688,61 +681,79 @@ rio_interrupt (int irq, void *dev_instance, struct pt_regs *rgs) ...@@ -688,61 +681,79 @@ rio_interrupt (int irq, void *dev_instance, struct pt_regs *rgs)
ioaddr = dev->base_addr; ioaddr = dev->base_addr;
np = dev->priv; np = dev->priv;
spin_lock(&np->lock);
while (1) { while (1) {
int_status = readw (ioaddr + IntStatus); int_status = readw (ioaddr + IntStatus);
writew (int_status, ioaddr + IntStatus); writew (int_status, ioaddr + IntStatus);
int_status &= DEFAULT_INTR; int_status &= DEFAULT_INTR;
if (int_status == 0) if (int_status == 0 || --cnt < 0)
break; break;
/* Processing received packets */ /* Processing received packets */
if (int_status & RxDMAComplete) if (int_status & RxDMAComplete)
receive_packet (dev); receive_packet (dev);
/* TxComplete interrupt */ /* TxDMAComplete interrupt */
if ((int_status & TxComplete) || np->tx_full) { if ((int_status & (TxDMAComplete|IntRequested))) {
int tx_status; int tx_status;
tx_status = readl (ioaddr + TxStatus); tx_status = readl (ioaddr + TxStatus);
if (tx_status & 0x01) if (tx_status & 0x01)
tx_error (dev, tx_status); tx_error (dev, tx_status);
/* Free used tx skbuffs */ /* Free used tx skbuffs */
for (;np->cur_tx - np->old_tx > 0; np->old_tx++) { rio_free_tx (dev, 1);
int entry = np->old_tx % TX_RING_SIZE;
struct sk_buff *skb;
if (!(np->tx_ring[entry].status & TFDDone))
break;
skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev,
np->tx_ring[entry].fraginfo,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq (skb);
np->tx_skbuff[entry] = 0;
}
}
/* If the ring is no longer full, clear tx_full and
call netif_wake_queue() */
if (np->tx_full && np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1) {
if (np->speed != 10 || int_status & TxComplete) {
np->tx_full = 0;
netif_wake_queue (dev);
}
} }
/* Handle uncommon events */ /* Handle uncommon events */
if (int_status & if (int_status &
(IntRequested | HostError | LinkEvent | UpdateStats)) (HostError | LinkEvent | UpdateStats))
rio_error (dev, int_status); rio_error (dev, int_status);
/* If too much interrupts here, disable all interrupts except }
IntRequest. When CountDown down to 0, IntRequest will if (np->cur_tx != np->old_tx)
be caught by rio_error() to recovery the interrupts */ writel (100, ioaddr + CountDown);
if (--cnt < 0) { }
get_stats (dev);
writel (1, ioaddr + CountDown); static void
writew (IntRequested, ioaddr + IntEnable); rio_free_tx (struct net_device *dev, int irq)
{
struct netdev_private *np = (struct netdev_private *) dev->priv;
int entry = np->old_tx % TX_RING_SIZE;
int tx_use = 0;
long flag = 0;
if (irq)
spin_lock_irqsave(&np->tx_lock, flag);
else
spin_lock(&np->tx_lock);
/* Free used tx skbuffs */
while (entry != np->cur_tx) {
struct sk_buff *skb;
if (!(np->tx_ring[entry].status & TFDDone))
break; break;
} skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev,
np->tx_ring[entry].fraginfo,
skb->len, PCI_DMA_TODEVICE);
if (irq)
dev_kfree_skb_irq (skb);
else
dev_kfree_skb (skb);
np->tx_skbuff[entry] = 0;
entry = (entry + 1) % TX_RING_SIZE;
tx_use++;
}
if (irq)
spin_unlock_irqrestore(&np->tx_lock, flag);
else
spin_unlock(&np->tx_lock);
np->old_tx = entry;
/* If the ring is no longer full, clear tx_full and
call netif_wake_queue() */
if (netif_queue_stopped(dev) &&
((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
< TX_QUEUE_LEN - 1 || np->speed == 10)) {
netif_wake_queue (dev);
} }
spin_unlock(&np->lock);
} }
static void static void
...@@ -755,11 +766,10 @@ tx_error (struct net_device *dev, int tx_status) ...@@ -755,11 +766,10 @@ tx_error (struct net_device *dev, int tx_status)
np = dev->priv; np = dev->priv;
frame_id = (tx_status & 0xffff0000) >> 16; frame_id = (tx_status & 0xffff0000);
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
dev->name, tx_status, frame_id); dev->name, tx_status, frame_id);
np->stats.tx_errors++; np->stats.tx_errors++;
np->stats.tx_dropped++;
/* Ttransmit Underrun */ /* Ttransmit Underrun */
if (tx_status & 0x10) { if (tx_status & 0x10) {
np->stats.tx_fifo_errors++; np->stats.tx_fifo_errors++;
...@@ -774,20 +784,7 @@ tx_error (struct net_device *dev, int tx_status) ...@@ -774,20 +784,7 @@ tx_error (struct net_device *dev, int tx_status)
break; break;
mdelay (1); mdelay (1);
} }
/* Free completed descriptors */ rio_free_tx (dev, 1);
for (; np->cur_tx - np->old_tx > 0; np->old_tx++) {
int entry = np->old_tx % TX_RING_SIZE;
struct sk_buff *skb;
if (!(np->tx_ring[entry].status & TFDDone))
break;
skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev, np->tx_ring[entry].fraginfo,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq (skb);
np->tx_skbuff[entry] = 0;
}
/* Reset TFDListPtr */ /* Reset TFDListPtr */
writel (np->tx_ring_dma + writel (np->tx_ring_dma +
np->old_tx * sizeof (struct netdev_desc), np->old_tx * sizeof (struct netdev_desc),
...@@ -810,14 +807,13 @@ tx_error (struct net_device *dev, int tx_status) ...@@ -810,14 +807,13 @@ tx_error (struct net_device *dev, int tx_status)
/* Let TxStartThresh stay default value */ /* Let TxStartThresh stay default value */
} }
/* Maximum Collisions */ /* Maximum Collisions */
#ifdef ETHER_STATS #ifdef ETHER_STATS
if (tx_status & 0x08) if (tx_status & 0x08)
np->stats.collisions16++; np->stats.collisions16++;
#else #else
if (tx_status & 0x08) if (tx_status & 0x08)
np->stats.collisions++; np->stats.collisions++;
#endif #endif
/* Restart the Tx */ /* Restart the Tx */
writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl); writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);
} }
...@@ -827,16 +823,8 @@ receive_packet (struct net_device *dev) ...@@ -827,16 +823,8 @@ receive_packet (struct net_device *dev)
{ {
struct netdev_private *np = (struct netdev_private *) dev->priv; struct netdev_private *np = (struct netdev_private *) dev->priv;
int entry = np->cur_rx % RX_RING_SIZE; int entry = np->cur_rx % RX_RING_SIZE;
int cnt = np->old_rx + RX_RING_SIZE - np->cur_rx; int cnt = 30;
int rx_shift;
spin_lock (&np->rx_lock);
if (np->old_rx > RX_RING_SIZE) {
rx_shift = RX_RING_SIZE;
np->old_rx -= rx_shift;
np->cur_rx -= rx_shift;
}
DEBUG_RFD_DUMP (np, 1);
/* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */ /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
while (1) { while (1) {
struct netdev_desc *desc = &np->rx_ring[entry]; struct netdev_desc *desc = &np->rx_ring[entry];
...@@ -852,20 +840,19 @@ receive_packet (struct net_device *dev) ...@@ -852,20 +840,19 @@ receive_packet (struct net_device *dev)
frame_status = le64_to_cpu (desc->status); frame_status = le64_to_cpu (desc->status);
if (--cnt < 0) if (--cnt < 0)
break; break;
DEBUG_PKT_DUMP (np, pkt_len);
pci_dma_sync_single (np->pdev, desc->fraginfo, np->rx_buf_sz, pci_dma_sync_single (np->pdev, desc->fraginfo, np->rx_buf_sz,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
/* Update rx error statistics, drop packet. */ /* Update rx error statistics, drop packet. */
if (frame_status & 0x003f0000) { if (frame_status & RFS_Errors) {
np->stats.rx_errors++; np->stats.rx_errors++;
if (frame_status & 0x00300000) if (frame_status & (RxRuntFrame | RxLengthError))
np->stats.rx_length_errors++; np->stats.rx_length_errors++;
if (frame_status & 0x00010000) if (frame_status & RxFCSError)
np->stats.rx_fifo_errors++;
if (frame_status & 0x00060000)
np->stats.rx_frame_errors++;
if (frame_status & 0x00080000)
np->stats.rx_crc_errors++; np->stats.rx_crc_errors++;
if (frame_status & RxAlignmentError && np->speed != 1000)
np->stats.rx_frame_errors++;
if (frame_status & RxFIFOOverrun)
np->stats.rx_fifo_errors++;
} else { } else {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -893,17 +880,17 @@ receive_packet (struct net_device *dev) ...@@ -893,17 +880,17 @@ receive_packet (struct net_device *dev)
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} }
#endif #endif
netif_rx (skb); netif_rx (skb);
dev->last_rx = jiffies; dev->last_rx = jiffies;
} }
entry = (++np->cur_rx) % RX_RING_SIZE; entry = (entry + 1) % RX_RING_SIZE;
} }
spin_lock(&np->rx_lock);
np->cur_rx = entry;
/* Re-allocate skbuffs to fill the descriptor ring */ /* Re-allocate skbuffs to fill the descriptor ring */
for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { entry = np->old_rx;
while (entry != np->cur_rx) {
struct sk_buff *skb; struct sk_buff *skb;
entry = np->old_rx % RX_RING_SIZE;
/* Dropped packets don't need to re-allocate */ /* Dropped packets don't need to re-allocate */
if (np->rx_skbuff[entry] == NULL) { if (np->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb (np->rx_buf_sz); skb = dev_alloc_skb (np->rx_buf_sz);
...@@ -927,11 +914,9 @@ receive_packet (struct net_device *dev) ...@@ -927,11 +914,9 @@ receive_packet (struct net_device *dev)
np->rx_ring[entry].fraginfo |= np->rx_ring[entry].fraginfo |=
cpu_to_le64 (np->rx_buf_sz) << 48; cpu_to_le64 (np->rx_buf_sz) << 48;
np->rx_ring[entry].status = 0; np->rx_ring[entry].status = 0;
/* RxDMAPollNow */ entry = (entry + 1) % RX_RING_SIZE;
writel (readl (dev->base_addr + DMACtrl) | 0x00000010,
dev->base_addr + DMACtrl);
} }
DEBUG_RFD_DUMP (np, 2); np->old_rx = entry;
spin_unlock(&np->rx_lock); spin_unlock(&np->rx_lock);
return 0; return 0;
} }
...@@ -943,14 +928,6 @@ rio_error (struct net_device *dev, int int_status) ...@@ -943,14 +928,6 @@ rio_error (struct net_device *dev, int int_status)
struct netdev_private *np = dev->priv; struct netdev_private *np = dev->priv;
u16 macctrl; u16 macctrl;
/* Stop the down counter and recovery the interrupt */
if (int_status & IntRequested) {
writew (0, ioaddr + IntEnable);
writel (0, ioaddr + CountDown);
/* Enable default interrupts */
EnableInt ();
}
/* Link change event */ /* Link change event */
if (int_status & LinkEvent) { if (int_status & LinkEvent) {
if (mii_wait_link (dev, 10) == 0) { if (mii_wait_link (dev, 10) == 0) {
...@@ -960,14 +937,19 @@ rio_error (struct net_device *dev, int int_status) ...@@ -960,14 +937,19 @@ rio_error (struct net_device *dev, int int_status)
else else
mii_get_media (dev); mii_get_media (dev);
macctrl = 0; macctrl = 0;
macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
macctrl |= (np->full_duplex) ? DuplexSelect : 0; macctrl |= (np->full_duplex) ? DuplexSelect : 0;
macctrl |= (np->tx_flow) ? macctrl |= (np->tx_flow) ?
TxFlowControlEnable : 0; TxFlowControlEnable : 0;
macctrl |= (np->rx_flow) ? macctrl |= (np->rx_flow) ?
RxFlowControlEnable : 0; RxFlowControlEnable : 0;
writew(macctrl, ioaddr + MACCtrl); writew(macctrl, ioaddr + MACCtrl);
np->link_status = 1;
netif_carrier_on(dev);
} else { } else {
printk (KERN_INFO "%s: Link off\n", dev->name); printk (KERN_INFO "%s: Link off\n", dev->name);
np->link_status = 0;
netif_carrier_off(dev);
} }
} }
...@@ -991,41 +973,100 @@ get_stats (struct net_device *dev) ...@@ -991,41 +973,100 @@ get_stats (struct net_device *dev)
{ {
long ioaddr = dev->base_addr; long ioaddr = dev->base_addr;
struct netdev_private *np = dev->priv; struct netdev_private *np = dev->priv;
u16 temp1;
u16 temp2;
int i; int i;
unsigned int stat_reg;
/* All statistics registers need to be acknowledged, /* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */ else statistic overflow could cause problems */
np->stats.rx_packets += readl (ioaddr + FramesRcvOk); np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
np->stats.tx_packets += readl (ioaddr + FramesXmtOk); np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
np->stats.rx_bytes += readl (ioaddr + OctetRcvOk); np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
np->stats.tx_bytes += readl (ioaddr + OctetXmtOk); np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
temp1 = readw (ioaddr + FrameLostRxError);
np->stats.rx_errors += temp1; np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
np->stats.rx_missed_errors += temp1; np->stats.collisions += readl (ioaddr + SingleColFrames)
np->stats.tx_dropped += readw (ioaddr + FramesAbortXSColls); + readl (ioaddr + MultiColFrames);
temp1 = readl (ioaddr + SingleColFrames) +
readl (ioaddr + MultiColFrames) + readl (ioaddr + LateCollisions); /* detailed tx errors */
temp2 = readw (ioaddr + CarrierSenseErrors); stat_reg = readw (ioaddr + FramesAbortXSColls);
np->stats.tx_carrier_errors += temp2; np->stats.tx_aborted_errors += stat_reg;
np->stats.tx_errors += readw (ioaddr + FramesWEXDeferal) + np->stats.tx_errors += stat_reg;
readl (ioaddr + FramesWDeferredXmt) + temp2;
stat_reg = readw (ioaddr + CarrierSenseErrors);
/* detailed rx_error */ np->stats.tx_carrier_errors += stat_reg;
np->stats.rx_length_errors += readw (ioaddr + FrameTooLongErrors); np->stats.tx_errors += stat_reg;
np->stats.rx_crc_errors += readw (ioaddr + FrameCheckSeqError);
/* Clear all other statistic register. */ /* Clear all other statistic register. */
readw (ioaddr + InRangeLengthErrors); readl (ioaddr + McstOctetXmtOk);
readw (ioaddr + MacControlFramesXmtd);
readw (ioaddr + BcstFramesXmtdOk); readw (ioaddr + BcstFramesXmtdOk);
readl (ioaddr + McstFramesXmtdOk); readl (ioaddr + McstFramesXmtdOk);
readw (ioaddr + BcstFramesRcvdOk);
readw (ioaddr + MacControlFramesRcvd);
readw (ioaddr + FrameTooLongErrors);
readw (ioaddr + InRangeLengthErrors);
readw (ioaddr + FramesCheckSeqErrors);
readw (ioaddr + FramesLostRxErrors);
readl (ioaddr + McstOctetXmtOk);
readl (ioaddr + BcstOctetXmtOk); readl (ioaddr + BcstOctetXmtOk);
readl (ioaddr + McstFramesXmtdOk);
readl (ioaddr + FramesWDeferredXmt);
readl (ioaddr + LateCollisions);
readw (ioaddr + BcstFramesXmtdOk);
readw (ioaddr + MacControlFramesXmtd);
readw (ioaddr + FramesWEXDeferal);
for (i = 0x100; i <= 0x150; i += 4)
readl (ioaddr + i);
readw (ioaddr + TxJumboFrames);
readw (ioaddr + RxJumboFrames);
readw (ioaddr + TCPCheckSumErrors);
readw (ioaddr + UDPCheckSumErrors);
readw (ioaddr + IPCheckSumErrors);
return &np->stats;
}
static int
clear_stats (struct net_device *dev)
{
long ioaddr = dev->base_addr;
int i;
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
readl (ioaddr + FramesRcvOk);
readl (ioaddr + FramesXmtOk);
readl (ioaddr + OctetRcvOk);
readl (ioaddr + OctetXmtOk);
readl (ioaddr + McstFramesRcvdOk);
readl (ioaddr + SingleColFrames);
readl (ioaddr + MultiColFrames);
readl (ioaddr + LateCollisions);
/* detailed rx errors */
readw (ioaddr + FrameTooLongErrors);
readw (ioaddr + InRangeLengthErrors);
readw (ioaddr + FramesCheckSeqErrors);
readw (ioaddr + FramesLostRxErrors);
/* detailed tx errors */
readw (ioaddr + FramesAbortXSColls);
readw (ioaddr + CarrierSenseErrors);
/* Clear all other statistic register. */
readl (ioaddr + McstOctetXmtOk); readl (ioaddr + McstOctetXmtOk);
readw (ioaddr + BcstFramesXmtdOk);
readl (ioaddr + McstFramesXmtdOk);
readw (ioaddr + BcstFramesRcvdOk);
readw (ioaddr + MacControlFramesRcvd); readw (ioaddr + MacControlFramesRcvd);
readw (ioaddr + BcstFramesRcvOk); readl (ioaddr + McstOctetXmtOk);
readl (ioaddr + McstFramesRcvOk); readl (ioaddr + BcstOctetXmtOk);
readl (ioaddr + BcstOctetRcvOk); readl (ioaddr + McstFramesXmtdOk);
readl (ioaddr + FramesWDeferredXmt);
readw (ioaddr + BcstFramesXmtdOk);
readw (ioaddr + MacControlFramesXmtd);
readw (ioaddr + FramesWEXDeferal);
for (i = 0x100; i <= 0x150; i += 4) for (i = 0x100; i <= 0x150; i += 4)
readl (ioaddr + i); readl (ioaddr + i);
...@@ -1034,9 +1075,10 @@ get_stats (struct net_device *dev) ...@@ -1034,9 +1075,10 @@ get_stats (struct net_device *dev)
readw (ioaddr + TCPCheckSumErrors); readw (ioaddr + TCPCheckSumErrors);
readw (ioaddr + UDPCheckSumErrors); readw (ioaddr + UDPCheckSumErrors);
readw (ioaddr + IPCheckSumErrors); readw (ioaddr + IPCheckSumErrors);
return &np->stats; return 0;
} }
int int
change_mtu (struct net_device *dev, int new_mtu) change_mtu (struct net_device *dev, int new_mtu)
{ {
...@@ -1111,51 +1153,35 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) ...@@ -1111,51 +1153,35 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
int phy_addr; int phy_addr;
struct netdev_private *np = dev->priv; struct netdev_private *np = dev->priv;
struct mii_data *miidata = (struct mii_data *) &rq->ifr_data; struct mii_data *miidata = (struct mii_data *) &rq->ifr_data;
#ifdef RIO_DEBUG
struct ioctl_data *iodata = (struct ioctl_data *) (rq->ifr_data);
#endif
u16 *data = (u16 *) & rq->ifr_data;
struct netdev_desc *desc; struct netdev_desc *desc;
int i; int i;
phy_addr = np->phy_addr; phy_addr = np->phy_addr;
switch (cmd) { switch (cmd) {
case SIOCDEVPRIVATE: case SIOCDEVPRIVATE:
#ifdef RIO_DEBUG
if (rio_ioctl_ext (dev, iodata) != 0)
return -EOPNOTSUPP;
break; break;
#else
return -EOPNOTSUPP;
#endif
case SIOCDEVPRIVATE + 1: case SIOCDEVPRIVATE + 1:
miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num); miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
break; break;
case SIOCDEVPRIVATE + 2: case SIOCDEVPRIVATE + 2:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value); mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
break; break;
case SIOCDEVPRIVATE + 3: case SIOCDEVPRIVATE + 3:
np->rx_debug = (data[0] <= 7) ? data[0] : 0;
printk ("rx_debug = %d\n", np->rx_debug);
break; break;
case SIOCDEVPRIVATE + 4: case SIOCDEVPRIVATE + 4:
np->tx_debug = (data[0] <= 7) ? data[0] : 0;
printk ("tx_debug = %d\n", np->tx_debug);
break; break;
case SIOCDEVPRIVATE + 5: case SIOCDEVPRIVATE + 5:
np->tx_full = 1;
netif_stop_queue (dev); netif_stop_queue (dev);
break; break;
case SIOCDEVPRIVATE + 6: case SIOCDEVPRIVATE + 6:
np->tx_full = 0;
netif_wake_queue (dev); netif_wake_queue (dev);
break; break;
case SIOCDEVPRIVATE + 7: case SIOCDEVPRIVATE + 7:
printk printk
("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n", ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
np->tx_full, np->cur_tx, np->old_tx, np->cur_rx, netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
np->old_rx); np->old_rx);
break; break;
case SIOCDEVPRIVATE + 8: case SIOCDEVPRIVATE + 8:
...@@ -1163,7 +1189,8 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) ...@@ -1163,7 +1189,8 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
for (i = 0; i < TX_RING_SIZE; i++) { for (i = 0; i < TX_RING_SIZE; i++) {
desc = &np->tx_ring[i]; desc = &np->tx_ring[i];
printk printk
("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x", ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
i,
(u32) (np->tx_ring_dma + i * sizeof (*desc)), (u32) (np->tx_ring_dma + i * sizeof (*desc)),
(u32) desc->next_desc, (u32) desc->next_desc,
(u32) desc->status, (u32) (desc->fraginfo >> 32), (u32) desc->status, (u32) (desc->fraginfo >> 32),
...@@ -1172,110 +1199,17 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) ...@@ -1172,110 +1199,17 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
} }
printk ("\n"); printk ("\n");
break; break;
default:
return -EOPNOTSUPP;
}
return 0;
}
#ifdef RIO_DEBUG
int
rio_ioctl_ext (struct net_device *dev, struct ioctl_data *iodata)
{
struct netdev_private *np = dev->priv;
int phy_addr = np->phy_addr;
u32 hi, lo;
int i;
BMCR_t bmcr;
BMSR_t bmsr;
if (iodata == NULL)
goto invalid_cmd;
if (strcmp (iodata->signature, "rio") != 0)
goto invalid_cmd;
switch (iodata->cmd) {
case 0:
for (i = 0; i < TX_RING_SIZE; i++) {
hi = np->tx_ring[i].status >> 32;
lo = np->tx_ring[i].status;
printk ("TFC=%08x %08x \n", hi, lo);
}
break;
case 1:
for (i = 0; i < RX_RING_SIZE; i++) {
hi = np->rx_ring[i].status >> 32;
lo = np->rx_ring[i].status;
printk ("RFS=%08x %08x \n", hi, lo);
}
break;
case 2:
break;
case 3:
if (iodata->data != NULL)
np->tx_debug = iodata->data[0];
break;
case 4:
/* Soft reset PHY */
mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
bmcr.image = 0;
bmcr.bits.an_enable = 1;
bmcr.bits.reset = 1;
mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
break;
case 5:
mii_write (dev, phy_addr, MII_BMCR, 0x1940);
mdelay (10);
mii_write (dev, phy_addr, MII_BMCR, 0x1940);
mdelay (100); /* wait a certain time */
break;
case 6:
/* 5) Set media and Power Up */
bmcr.image = 0;
bmcr.bits.power_down = 1;
if (np->an_enable) {
bmcr.bits.an_enable = 1;
} else {
if (np->speed == 100) {
bmcr.bits.speed100 = 1;
bmcr.bits.speed1000 = 0;
printk ("Manual 100 Mbps, ");
} else if (np->speed == 10) {
bmcr.bits.speed100 = 0;
bmcr.bits.speed1000 = 0;
printk ("Manual 10 Mbps, ");
}
if (np->full_duplex) {
bmcr.bits.duplex_mode = 1;
printk ("Full duplex. \n");
} else {
bmcr.bits.duplex_mode = 0;
printk ("Half duplex.\n");
}
}
mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
break;
case 7:
bmcr.image = mii_read (dev, phy_addr, MII_BMCR);
bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
printk ("BMCR=%x BMSR=%x LinkUp=%d\n",
bmcr.image, bmsr.image, bmsr.bits.link_status);
break;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
return 0; return 0;
invalid_cmd:
return -1;
} }
#endif
#define EEP_READ 0x0200 #define EEP_READ 0x0200
#define EEP_BUSY 0x8000 #define EEP_BUSY 0x8000
/* Read the EEPROM word */ /* Read the EEPROM word */
static int __devinit int
read_eeprom (long ioaddr, int eep_addr) read_eeprom (long ioaddr, int eep_addr)
{ {
int i = 1000; int i = 1000;
...@@ -1441,10 +1375,11 @@ mii_get_media (struct net_device *dev) ...@@ -1441,10 +1375,11 @@ mii_get_media (struct net_device *dev)
printk (KERN_INFO "Auto 10 Mbps, Half duplex\n"); printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
} }
if (negotiate.bits.pause) { if (negotiate.bits.pause) {
np->tx_flow = 1; np->tx_flow &= 1;
np->rx_flow = 1; np->rx_flow &= 1;
} else if (negotiate.bits.asymmetric) { } else if (negotiate.bits.asymmetric) {
np->rx_flow = 1; np->tx_flow = 0;
np->rx_flow &= 1;
} }
/* else tx_flow, rx_flow = user select */ /* else tx_flow, rx_flow = user select */
} else { } else {
...@@ -1593,10 +1528,11 @@ mii_get_media_pcs (struct net_device *dev) ...@@ -1593,10 +1528,11 @@ mii_get_media_pcs (struct net_device *dev)
np->full_duplex = 0; np->full_duplex = 0;
} }
if (negotiate.bits.pause) { if (negotiate.bits.pause) {
np->tx_flow = 1; np->tx_flow &= 1;
np->rx_flow = 1; np->rx_flow &= 1;
} else if (negotiate.bits.asymmetric) { } else if (negotiate.bits.asymmetric) {
np->rx_flow = 1; np->tx_flow = 0;
np->rx_flow &= 1;
} }
/* else tx_flow, rx_flow = user select */ /* else tx_flow, rx_flow = user select */
} else { } else {
......
...@@ -119,13 +119,13 @@ enum dl2x_offsets { ...@@ -119,13 +119,13 @@ enum dl2x_offsets {
McstOctetRcvOk = 0xac, McstOctetRcvOk = 0xac,
BcstOctetRcvOk = 0xb0, BcstOctetRcvOk = 0xb0,
FramesRcvOk = 0xb4, FramesRcvOk = 0xb4,
McstFramesRcvOk = 0xb8, McstFramesRcvdOk = 0xb8,
BcstFramesRcvOk = 0xbe, BcstFramesRcvdOk = 0xbe,
MacControlFramesRcvd = 0xc6, MacControlFramesRcvd = 0xc6,
FrameTooLongErrors = 0xc8, FrameTooLongErrors = 0xc8,
InRangeLengthErrors = 0xca, InRangeLengthErrors = 0xca,
FrameCheckSeqError = 0xcc, FramesCheckSeqErrors = 0xcc,
FrameLostRxError = 0xce, FramesLostRxErrors = 0xce,
OctetXmtOk = 0xd0, OctetXmtOk = 0xd0,
McstOctetXmtOk = 0xd4, McstOctetXmtOk = 0xd4,
BcstOctetXmtOk = 0xd8, BcstOctetXmtOk = 0xd8,
...@@ -264,6 +264,7 @@ enum RFS_bits { ...@@ -264,6 +264,7 @@ enum RFS_bits {
FrameEnd = 0x40000000, FrameEnd = 0x40000000,
RFDDone = 0x80000000, RFDDone = 0x80000000,
TCIShift = 32, TCIShift = 32,
RFS_Errors = 0x003f0000,
}; };
#define MII_RESET_TIME_OUT 10000 #define MII_RESET_TIME_OUT 10000
...@@ -648,7 +649,7 @@ struct netdev_private { ...@@ -648,7 +649,7 @@ struct netdev_private {
dma_addr_t tx_ring_dma; dma_addr_t tx_ring_dma;
dma_addr_t rx_ring_dma; dma_addr_t rx_ring_dma;
struct pci_dev *pdev; struct pci_dev *pdev;
spinlock_t lock; spinlock_t tx_lock;
spinlock_t rx_lock; spinlock_t rx_lock;
struct net_device_stats stats; struct net_device_stats stats;
unsigned int rx_buf_sz; /* Based on MTU+slack. */ unsigned int rx_buf_sz; /* Based on MTU+slack. */
...@@ -657,7 +658,6 @@ struct netdev_private { ...@@ -657,7 +658,6 @@ struct netdev_private {
unsigned int chip_id; /* PCI table chip id */ unsigned int chip_id; /* PCI table chip id */
unsigned int rx_coalesce; /* Maximum frames each RxDMAComplete intr */ unsigned int rx_coalesce; /* Maximum frames each RxDMAComplete intr */
unsigned int rx_timeout; /* Wait time between RxDMAComplete intr */ unsigned int rx_timeout; /* Wait time between RxDMAComplete intr */
unsigned int tx_full:1; /* The Tx queue is full. */
unsigned int full_duplex:1; /* Full-duplex operation requested. */ unsigned int full_duplex:1; /* Full-duplex operation requested. */
unsigned int an_enable:2; /* Auto-Negotiated Enable */ unsigned int an_enable:2; /* Auto-Negotiated Enable */
unsigned int jumbo:1; /* Jumbo frame enable */ unsigned int jumbo:1; /* Jumbo frame enable */
...@@ -665,6 +665,7 @@ struct netdev_private { ...@@ -665,6 +665,7 @@ struct netdev_private {
unsigned int tx_flow:1; /* Tx flow control enable */ unsigned int tx_flow:1; /* Tx flow control enable */
unsigned int rx_flow:1; /* Rx flow control enable */ unsigned int rx_flow:1; /* Rx flow control enable */
unsigned int phy_media:1; /* 1: fiber, 0: copper */ unsigned int phy_media:1; /* 1: fiber, 0: copper */
unsigned int link_status:1; /* Current link status */
unsigned char pci_rev_id; /* PCI revision ID */ unsigned char pci_rev_id; /* PCI revision ID */
struct netdev_desc *last_tx; /* Last Tx descriptor used. */ struct netdev_desc *last_tx; /* Last Tx descriptor used. */
unsigned long cur_rx, old_rx; /* Producer/consumer ring indices */ unsigned long cur_rx, old_rx; /* Producer/consumer ring indices */
...@@ -677,8 +678,6 @@ struct netdev_private { ...@@ -677,8 +678,6 @@ struct netdev_private {
u16 advertising; /* NWay media advertisement */ u16 advertising; /* NWay media advertisement */
u16 negotiate; /* Negotiated media */ u16 negotiate; /* Negotiated media */
int phy_addr; /* PHY addresses. */ int phy_addr; /* PHY addresses. */
int tx_debug;
int rx_debug;
}; };
/* The station address location in the EEPROM. */ /* The station address location in the EEPROM. */
...@@ -707,114 +706,4 @@ MODULE_DEVICE_TABLE (pci, rio_pci_tbl); ...@@ -707,114 +706,4 @@ MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
#define DEFAULT_RXT 750 #define DEFAULT_RXT 750
#define DEFAULT_TXC 1 #define DEFAULT_TXC 1
#define MAX_TXC 8 #define MAX_TXC 8
#ifdef RIO_DEBUG
#define DEBUG_TFD_DUMP(x) debug_tfd_dump(x)
#define DEBUG_RFD_DUMP(x,flag) debug_rfd_dump(x,flag)
#define DEBUG_PKT_DUMP(x,len) debug_pkt_dump(x,len)
#define DEBUG_PRINT printk
static inline void
debug_tfd_dump (struct netdev_private *np)
{
int i;
struct netdev_desc *desc;
if (np->tx_debug == 6) {
printk ("TFDone Dump: ");
for (i = 0; i < TX_RING_SIZE; i++) {
desc = &np->tx_ring[i];
if ((desc->fraginfo & 0xffffffffff) == 0)
printk ("X");
else
printk ("%d", (desc->status & TFDDone) ? 1 : 0);
}
printk ("\n");
}
if (np->tx_debug == 5) {
for (i = 0; i < TX_RING_SIZE; i++) {
desc = &np->tx_ring[i];
printk
("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
(u32) np->tx_ring_dma + i * sizeof (*desc),
(u32) desc->next_desc, (u32) desc->status,
(u32) (desc->fraginfo >> 32),
(u32) desc->fraginfo);
printk ("\n");
}
printk ("\n");
}
}
static inline void
debug_rfd_dump (struct netdev_private *np, int flag)
{
int i;
struct netdev_desc *desc;
int entry = np->cur_rx % RX_RING_SIZE;
if (np->rx_debug == 5) {
for (i = 0; i < RX_RING_SIZE; i++) {
desc = &np->rx_ring[i];
printk
("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
(u32) np->rx_ring_dma + i * sizeof (*desc),
(u32) desc->next_desc, (u32) desc->status,
(u32) (desc->fraginfo >> 32),
(u32) desc->fraginfo);
printk ("\n");
}
printk ("\n");
}
if (np->rx_debug == 6) {
if (flag == 1)
printk ("RFDone Dump: ");
else if (flag == 2)
printk ("Re-Filling: ");
for (i = 0; i < RX_RING_SIZE; i++) {
desc = &np->rx_ring[i];
printk ("%d", (desc->status & RFDDone) ? 1 : 0);
}
printk ("\n");
}
if (np->rx_debug == 7) {
printk (" In rcv_pkt(), entry %d status %4.4x %4.4x.\n",
entry, (u32) (np->rx_ring[entry].status >> 32),
(u32) np->rx_ring[entry].status);
}
}
static inline void
debug_pkt_dump (struct netdev_private *np, int pkt_len)
{
int entry = np->cur_rx % RX_RING_SIZE;
struct netdev_desc *desc = &np->rx_ring[entry];
u64 frame_status = le64_to_cpu (desc->status);
unsigned char *pchar;
unsigned char *phead;
int i;
if (np->rx_debug == 4) {
printk (" rcv_pkt: status was %4.4x %4.4x.\n",
(u32) (frame_status >> 32), (u32) frame_status);
}
if (np->rx_debug == 7) {
#error Please convert me to Documentation/DMA-mapping.txt
phead =
bus_to_virt (le64_to_cpu (desc->fraginfo & 0xffffffffff));
for (pchar = phead, i = 0; i < pkt_len; i++, pchar++) {
printk ("%02x ", *pchar);
if ((i + 1) % 20 == 0)
printk ("\n");
}
}
}
#else
#define DEBUG_TFD_DUMP(x) {}
#define DEBUG_RFD_DUMP(x,flag) {}
#define DEBUG_PKT_DUMP(x,len) {}
#define DEBUG_PRINT() {}
#endif
#endif /* __DL2K_H__ */ #endif /* __DL2K_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment