Commit 6382e49f authored by Edward Peng's avatar Edward Peng Committed by Jeff Garzik

dl2k gige net driver updates:

* fix carrier and rx frame error miscounts
* fix vlan bug
* h/w dma bug workaround
* ISR chip-based timer scheme
parent 4e130235
......@@ -29,24 +29,28 @@
1.08 2002/01/17 Fixed the multicast bug.
1.09 2002/03/07 Move rx-poll-now to re-fill loop.
Added rio_timer() to watch rx buffers.
1.10 2002/04/16 Fixed miscount of carrier error.
1.11 2002/05/23 Added ISR schedule scheme.
Fixed miscount of rx frame error for DGE-550SX.
Fixed VLAN bug.
*/
#include "dl2k.h"
static char version[] __devinitdata =
KERN_INFO "D-Link DL2000-based linux driver v1.09 2002/03/07\n";
KERN_INFO "D-Link DL2000-based linux driver v1.11 2002/05/23\n";
#define MAX_UNITS 8
static int mtu[MAX_UNITS];
static int vlan[MAX_UNITS];
static int jumbo[MAX_UNITS];
static char *media[MAX_UNITS];
static int tx_flow[MAX_UNITS];
static int rx_flow[MAX_UNITS];
static int tx_flow=-1;
static int rx_flow=-1;
static int copy_thresh;
static int rx_coalesce; /* Rx frame count each interrupt */
static int rx_timeout; /* Rx DMA wait time in 64ns increments */
static int tx_coalesce = DEFAULT_TXC; /* HW xmit count each TxComplete [1-8] */
static int rx_coalesce=10; /* Rx frame count each interrupt */
static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */
static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */
MODULE_AUTHOR ("Edward Peng");
......@@ -56,16 +60,16 @@ MODULE_PARM (mtu, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (media, "1-" __MODULE_STRING (MAX_UNITS) "s");
MODULE_PARM (vlan, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (jumbo, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (tx_flow, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (rx_flow, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (tx_flow, "i");
MODULE_PARM (rx_flow, "i");
MODULE_PARM (copy_thresh, "i");
MODULE_PARM (rx_coalesce, "i"); /* Rx frame count each interrupt */
MODULE_PARM (rx_timeout, "i"); /* Rx DMA wait time in 64ns increments */
MODULE_PARM (tx_coalesce, "i"); /* HW xmit count each TxComplete [1-8] */
MODULE_PARM (tx_coalesce, "i"); /* HW xmit count each TxDMAComplete */
/* Enable the default interrupts */
#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxComplete| \
#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
UpdateStats | LinkEvent)
#define EnableInt() \
writew(DEFAULT_INTR, ioaddr + IntEnable)
......@@ -75,16 +79,18 @@ static int multicast_filter_limit = 0x40;
static int rio_open (struct net_device *dev);
static void rio_timer (unsigned long data);
static void tx_timeout (struct net_device *dev);
static void rio_tx_timeout (struct net_device *dev);
static void alloc_list (struct net_device *dev);
static int start_xmit (struct sk_buff *skb, struct net_device *dev);
static void rio_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
static void rio_free_tx (struct net_device *dev, int irq);
static void tx_error (struct net_device *dev, int tx_status);
static int receive_packet (struct net_device *dev);
static void rio_error (struct net_device *dev, int int_status);
static int change_mtu (struct net_device *dev, int new_mtu);
static void set_multicast (struct net_device *dev);
static struct net_device_stats *get_stats (struct net_device *dev);
static int clear_stats (struct net_device *dev);
static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
static int rio_close (struct net_device *dev);
static int find_miiphy (struct net_device *dev);
......@@ -98,9 +104,6 @@ static int mii_get_media_pcs (struct net_device *dev);
static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
u16 data);
#ifdef RIO_DEBUG
static int rio_ioctl_ext (struct net_device *dev, struct ioctl_data *iodata);
#endif
static int __devinit
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
......@@ -109,7 +112,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
struct netdev_private *np;
static int card_idx;
int chip_idx = ent->driver_data;
int err, irq = pdev->irq;
int err, irq;
long ioaddr;
static int version_printed;
void *ring_space;
......@@ -122,6 +125,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
irq = pdev->irq;
err = pci_request_regions (pdev, "dl2k");
if (err)
goto err_out_disable;
......@@ -149,7 +153,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np = dev->priv;
np->chip_id = chip_idx;
np->pdev = pdev;
spin_lock_init (&np->lock);
spin_lock_init (&np->tx_lock);
spin_lock_init (&np->rx_lock);
/* Parse manual configuration */
......@@ -199,17 +203,18 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
}
np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
vlan[card_idx] : 0;
if (rx_coalesce != 0 && rx_timeout != 0) {
if (rx_coalesce > 0 && rx_timeout > 0) {
np->rx_coalesce = rx_coalesce;
np->rx_timeout = rx_timeout;
np->coalesce = 1;
}
np->tx_flow = (tx_flow[card_idx]) ? 1 : 0;
np->rx_flow = (rx_flow[card_idx]) ? 1 : 0;
np->tx_flow = (tx_flow == 0) ? 0 : 1;
np->rx_flow = (rx_flow == 0) ? 0 : 1;
if (tx_coalesce < 1)
tx_coalesce = 1;
if (tx_coalesce > 8)
tx_coalesce = 8;
if (tx_coalesce > TX_RING_SIZE-1)
tx_coalesce = TX_RING_SIZE - 1;
}
dev->open = &rio_open;
dev->hard_start_xmit = &start_xmit;
......@@ -217,7 +222,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->get_stats = &get_stats;
dev->set_multicast_list = &set_multicast;
dev->do_ioctl = &rio_ioctl;
dev->tx_timeout = &tx_timeout;
dev->tx_timeout = &rio_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
dev->change_mtu = &change_mtu;
#if 0
......@@ -247,6 +252,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
/* Fiber device? */
np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
np->link_status = 0;
/* Set media and reset PHY */
if (np->phy_media) {
/* default 1000mbps_fd for fiber deivices */
......@@ -281,6 +287,15 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->name, np->name,
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq);
if (tx_coalesce > 1)
printk(KERN_INFO "tx_coalesce:\t%d packets\n",
tx_coalesce);
if (np->coalesce)
printk(KERN_INFO "rx_coalesce:\t%d packets\n"
KERN_INFO "rx_timeout: \t%d ns\n",
np->rx_coalesce, np->rx_timeout*640);
if (np->vlan)
printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
return 0;
err_out_unmap_rx:
......@@ -327,7 +342,7 @@ find_miiphy (struct net_device *dev)
return 0;
}
static int __devinit
int
parse_eeprom (struct net_device *dev)
{
int i, j;
......@@ -346,7 +361,7 @@ parse_eeprom (struct net_device *dev)
}
/* Check CRC */
crc = ~ether_crc_le(256 - 4, sromdata);
crc = ~ether_crc_le(256 - 4, sromdata);
if (psrom->crc != crc) {
printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name);
return -1;
......@@ -431,8 +446,10 @@ rio_open (struct net_device *dev)
writeb (0xff, ioaddr + TxDMAPollPeriod);
writeb (0x30, ioaddr + RxDMABurstThresh);
writeb (0x30, ioaddr + RxDMAUrgentThresh);
netif_start_queue (dev);
writel (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl);
/* clear statistics */
clear_stats (dev);
/* VLAN supported */
if (np->vlan) {
/* priority field in RxDMAIntCtrl */
......@@ -451,15 +468,20 @@ rio_open (struct net_device *dev)
/* Enable default interrupts */
EnableInt ();
/* clear statistics */
get_stats (dev);
init_timer (&np->timer);
np->timer.expires = jiffies + 1*HZ;
np->timer.data = (unsigned long) dev;
np->timer.function = &rio_timer;
add_timer (&np->timer);
/* Start Tx/Rx */
writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
ioaddr + MACCtrl);
netif_start_queue (dev);
return 0;
}
static void
rio_timer (unsigned long data)
{
......@@ -469,10 +491,10 @@ rio_timer (unsigned long data)
int next_tick = 1*HZ;
unsigned long flags;
spin_lock_irqsave(&np->rx_lock, flags);
/* Recover rx ring exhausted error */
if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
printk(KERN_INFO "Try to recover rx ring exhausted...\n");
spin_lock_irqsave(&np->rx_lock, flags);
/* Re-allocate skbuffs to fill the descriptor ring */
for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
struct sk_buff *skb;
......@@ -500,43 +522,22 @@ rio_timer (unsigned long data)
cpu_to_le64 (np->rx_buf_sz) << 48;
np->rx_ring[entry].status = 0;
} /* end for */
spin_unlock_irqrestore (&np->rx_lock, flags);
} /* end if */
spin_unlock_irqrestore (&np->rx_lock, flags);
np->timer.expires = jiffies + next_tick;
add_timer(&np->timer);
}
static void
tx_timeout (struct net_device *dev)
rio_tx_timeout (struct net_device *dev)
{
struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
dev->name, readl (ioaddr + TxStatus));
/* Free used tx skbuffs */
for (; np->cur_tx - np->old_tx > 0; np->old_tx++) {
int entry = np->old_tx % TX_RING_SIZE;
struct sk_buff *skb;
if (!(np->tx_ring[entry].status & TFDDone))
break;
skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev,
np->tx_ring[entry].fraginfo,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq (skb);
np->tx_skbuff[entry] = 0;
}
rio_free_tx(dev, 0);
dev->if_port = 0;
dev->trans_start = jiffies;
np->stats.tx_errors++;
/* If the ring is no longer full, clear tx_full and
call netif_wake_queue() */
if (np->tx_full && np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1) {
np->tx_full = 0;
netif_wake_queue (dev);
}
}
/* allocate and initialize Tx and Rx descriptors */
......@@ -546,7 +547,6 @@ alloc_list (struct net_device *dev)
struct netdev_private *np = dev->priv;
int i;
np->tx_full = 0;
np->cur_rx = np->cur_tx = 0;
np->old_rx = np->old_tx = 0;
np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
......@@ -605,18 +605,17 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
struct netdev_desc *txdesc;
unsigned entry;
u32 ioaddr;
int tx_shift;
unsigned long flags;
u64 tfc_vlan_tag = 0;
if (np->link_status == 0) { /* Link Down */
dev_kfree_skb(skb);
return 0;
}
ioaddr = dev->base_addr;
entry = np->cur_tx % TX_RING_SIZE;
np->tx_skbuff[entry] = skb;
txdesc = &np->tx_ring[entry];
/* Set TFDDone to avoid TxDMA gather this descriptor */
txdesc->status = cpu_to_le64 (TFDDone);
txdesc->status |=
cpu_to_le64 (entry | WordAlignDisable | (1 << FragCountShift));
#if 0
if (skb->ip_summed == CHECKSUM_HW) {
txdesc->status |=
......@@ -630,31 +629,33 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
(cpu_to_le64 (np->vlan) << 32) |
(cpu_to_le64 (skb->priority) << 45);
}
/* Send one packet each time at 10Mbps mode */
/* Tx coalescing loop do not exceed 8 */
if (entry % tx_coalesce == 0 || np->speed == 10)
txdesc->status |= cpu_to_le64 (TxIndicate);
txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
skb->len,
PCI_DMA_TODEVICE));
txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48;
/* Clear TFDDone, then TxDMA start to send this descriptor */
txdesc->status &= ~cpu_to_le64 (TFDDone);
DEBUG_TFD_DUMP (np);
/* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
* Work around: Always use 1 descriptor in 10Mbps mode */
if (entry % tx_coalesce == 0 || np->speed == 10)
txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
WordAlignDisable |
TxDMAIndicate |
(1 << FragCountShift));
else
txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
WordAlignDisable |
(1 << FragCountShift));
/* TxDMAPollNow */
writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);
np->cur_tx++;
if (np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1 && np->speed != 10) {
/* Schedule ISR */
writel(10000, ioaddr + CountDown);
np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
< TX_QUEUE_LEN - 1 && np->speed != 10) {
/* do nothing */
} else {
spin_lock_irqsave(&np->lock, flags);
np->tx_full = 1;
} else if (!netif_queue_stopped(dev)) {
netif_stop_queue (dev);
spin_unlock_irqrestore (&np->lock, flags);
}
/* The first TFDListPtr */
......@@ -664,14 +665,6 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
writel (0, dev->base_addr + TFDListPtr1);
}
if (np->old_tx > TX_RING_SIZE) {
spin_lock_irqsave (&np->lock, flags);
tx_shift = TX_RING_SIZE;
np->old_tx -= tx_shift;
np->cur_tx -= tx_shift;
spin_unlock_irqrestore (&np->lock, flags);
}
/* NETDEV WATCHDOG timer */
dev->trans_start = jiffies;
return 0;
......@@ -688,61 +681,79 @@ rio_interrupt (int irq, void *dev_instance, struct pt_regs *rgs)
ioaddr = dev->base_addr;
np = dev->priv;
spin_lock(&np->lock);
while (1) {
int_status = readw (ioaddr + IntStatus);
writew (int_status, ioaddr + IntStatus);
int_status &= DEFAULT_INTR;
if (int_status == 0)
if (int_status == 0 || --cnt < 0)
break;
/* Processing received packets */
if (int_status & RxDMAComplete)
receive_packet (dev);
/* TxComplete interrupt */
if ((int_status & TxComplete) || np->tx_full) {
/* TxDMAComplete interrupt */
if ((int_status & (TxDMAComplete|IntRequested))) {
int tx_status;
tx_status = readl (ioaddr + TxStatus);
if (tx_status & 0x01)
tx_error (dev, tx_status);
/* Free used tx skbuffs */
for (;np->cur_tx - np->old_tx > 0; np->old_tx++) {
int entry = np->old_tx % TX_RING_SIZE;
struct sk_buff *skb;
if (!(np->tx_ring[entry].status & TFDDone))
break;
skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev,
np->tx_ring[entry].fraginfo,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq (skb);
np->tx_skbuff[entry] = 0;
}
}
/* If the ring is no longer full, clear tx_full and
call netif_wake_queue() */
if (np->tx_full && np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1) {
if (np->speed != 10 || int_status & TxComplete) {
np->tx_full = 0;
netif_wake_queue (dev);
}
rio_free_tx (dev, 1);
}
/* Handle uncommon events */
if (int_status &
(IntRequested | HostError | LinkEvent | UpdateStats))
(HostError | LinkEvent | UpdateStats))
rio_error (dev, int_status);
/* If too much interrupts here, disable all interrupts except
IntRequest. When CountDown down to 0, IntRequest will
be caught by rio_error() to recovery the interrupts */
if (--cnt < 0) {
get_stats (dev);
writel (1, ioaddr + CountDown);
writew (IntRequested, ioaddr + IntEnable);
}
if (np->cur_tx != np->old_tx)
writel (100, ioaddr + CountDown);
}
static void
rio_free_tx (struct net_device *dev, int irq)
{
struct netdev_private *np = (struct netdev_private *) dev->priv;
int entry = np->old_tx % TX_RING_SIZE;
int tx_use = 0;
long flag = 0;
if (irq)
spin_lock_irqsave(&np->tx_lock, flag);
else
spin_lock(&np->tx_lock);
/* Free used tx skbuffs */
while (entry != np->cur_tx) {
struct sk_buff *skb;
if (!(np->tx_ring[entry].status & TFDDone))
break;
}
skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev,
np->tx_ring[entry].fraginfo,
skb->len, PCI_DMA_TODEVICE);
if (irq)
dev_kfree_skb_irq (skb);
else
dev_kfree_skb (skb);
np->tx_skbuff[entry] = 0;
entry = (entry + 1) % TX_RING_SIZE;
tx_use++;
}
if (irq)
spin_unlock_irqrestore(&np->tx_lock, flag);
else
spin_unlock(&np->tx_lock);
np->old_tx = entry;
/* If the ring is no longer full, clear tx_full and
call netif_wake_queue() */
if (netif_queue_stopped(dev) &&
((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
< TX_QUEUE_LEN - 1 || np->speed == 10)) {
netif_wake_queue (dev);
}
spin_unlock(&np->lock);
}
static void
......@@ -755,11 +766,10 @@ tx_error (struct net_device *dev, int tx_status)
np = dev->priv;
frame_id = (tx_status & 0xffff0000) >> 16;
frame_id = (tx_status & 0xffff0000);
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
dev->name, tx_status, frame_id);
np->stats.tx_errors++;
np->stats.tx_dropped++;
/* Ttransmit Underrun */
if (tx_status & 0x10) {
np->stats.tx_fifo_errors++;
......@@ -774,20 +784,7 @@ tx_error (struct net_device *dev, int tx_status)
break;
mdelay (1);
}
/* Free completed descriptors */
for (; np->cur_tx - np->old_tx > 0; np->old_tx++) {
int entry = np->old_tx % TX_RING_SIZE;
struct sk_buff *skb;
if (!(np->tx_ring[entry].status & TFDDone))
break;
skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev, np->tx_ring[entry].fraginfo,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq (skb);
np->tx_skbuff[entry] = 0;
}
rio_free_tx (dev, 1);
/* Reset TFDListPtr */
writel (np->tx_ring_dma +
np->old_tx * sizeof (struct netdev_desc),
......@@ -810,14 +807,13 @@ tx_error (struct net_device *dev, int tx_status)
/* Let TxStartThresh stay default value */
}
/* Maximum Collisions */
#ifdef ETHER_STATS
if (tx_status & 0x08)
#ifdef ETHER_STATS
if (tx_status & 0x08)
np->stats.collisions16++;
#else
if (tx_status & 0x08)
if (tx_status & 0x08)
np->stats.collisions++;
#endif
/* Restart the Tx */
writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);
}
......@@ -827,16 +823,8 @@ receive_packet (struct net_device *dev)
{
struct netdev_private *np = (struct netdev_private *) dev->priv;
int entry = np->cur_rx % RX_RING_SIZE;
int cnt = np->old_rx + RX_RING_SIZE - np->cur_rx;
int rx_shift;
spin_lock (&np->rx_lock);
if (np->old_rx > RX_RING_SIZE) {
rx_shift = RX_RING_SIZE;
np->old_rx -= rx_shift;
np->cur_rx -= rx_shift;
}
DEBUG_RFD_DUMP (np, 1);
int cnt = 30;
/* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
while (1) {
struct netdev_desc *desc = &np->rx_ring[entry];
......@@ -852,20 +840,19 @@ receive_packet (struct net_device *dev)
frame_status = le64_to_cpu (desc->status);
if (--cnt < 0)
break;
DEBUG_PKT_DUMP (np, pkt_len);
pci_dma_sync_single (np->pdev, desc->fraginfo, np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
/* Update rx error statistics, drop packet. */
if (frame_status & 0x003f0000) {
if (frame_status & RFS_Errors) {
np->stats.rx_errors++;
if (frame_status & 0x00300000)
if (frame_status & (RxRuntFrame | RxLengthError))
np->stats.rx_length_errors++;
if (frame_status & 0x00010000)
np->stats.rx_fifo_errors++;
if (frame_status & 0x00060000)
np->stats.rx_frame_errors++;
if (frame_status & 0x00080000)
if (frame_status & RxFCSError)
np->stats.rx_crc_errors++;
if (frame_status & RxAlignmentError && np->speed != 1000)
np->stats.rx_frame_errors++;
if (frame_status & RxFIFOOverrun)
np->stats.rx_fifo_errors++;
} else {
struct sk_buff *skb;
......@@ -893,17 +880,17 @@ receive_packet (struct net_device *dev)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
#endif
netif_rx (skb);
dev->last_rx = jiffies;
}
entry = (++np->cur_rx) % RX_RING_SIZE;
entry = (entry + 1) % RX_RING_SIZE;
}
spin_lock(&np->rx_lock);
np->cur_rx = entry;
/* Re-allocate skbuffs to fill the descriptor ring */
for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
entry = np->old_rx;
while (entry != np->cur_rx) {
struct sk_buff *skb;
entry = np->old_rx % RX_RING_SIZE;
/* Dropped packets don't need to re-allocate */
if (np->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb (np->rx_buf_sz);
......@@ -927,11 +914,9 @@ receive_packet (struct net_device *dev)
np->rx_ring[entry].fraginfo |=
cpu_to_le64 (np->rx_buf_sz) << 48;
np->rx_ring[entry].status = 0;
/* RxDMAPollNow */
writel (readl (dev->base_addr + DMACtrl) | 0x00000010,
dev->base_addr + DMACtrl);
entry = (entry + 1) % RX_RING_SIZE;
}
DEBUG_RFD_DUMP (np, 2);
np->old_rx = entry;
spin_unlock(&np->rx_lock);
return 0;
}
......@@ -943,14 +928,6 @@ rio_error (struct net_device *dev, int int_status)
struct netdev_private *np = dev->priv;
u16 macctrl;
/* Stop the down counter and recovery the interrupt */
if (int_status & IntRequested) {
writew (0, ioaddr + IntEnable);
writel (0, ioaddr + CountDown);
/* Enable default interrupts */
EnableInt ();
}
/* Link change event */
if (int_status & LinkEvent) {
if (mii_wait_link (dev, 10) == 0) {
......@@ -960,14 +937,19 @@ rio_error (struct net_device *dev, int int_status)
else
mii_get_media (dev);
macctrl = 0;
macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
macctrl |= (np->full_duplex) ? DuplexSelect : 0;
macctrl |= (np->tx_flow) ?
TxFlowControlEnable : 0;
macctrl |= (np->rx_flow) ?
RxFlowControlEnable : 0;
writew(macctrl, ioaddr + MACCtrl);
np->link_status = 1;
netif_carrier_on(dev);
} else {
printk (KERN_INFO "%s: Link off\n", dev->name);
np->link_status = 0;
netif_carrier_off(dev);
}
}
......@@ -991,41 +973,100 @@ get_stats (struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct netdev_private *np = dev->priv;
u16 temp1;
u16 temp2;
int i;
unsigned int stat_reg;
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
temp1 = readw (ioaddr + FrameLostRxError);
np->stats.rx_errors += temp1;
np->stats.rx_missed_errors += temp1;
np->stats.tx_dropped += readw (ioaddr + FramesAbortXSColls);
temp1 = readl (ioaddr + SingleColFrames) +
readl (ioaddr + MultiColFrames) + readl (ioaddr + LateCollisions);
temp2 = readw (ioaddr + CarrierSenseErrors);
np->stats.tx_carrier_errors += temp2;
np->stats.tx_errors += readw (ioaddr + FramesWEXDeferal) +
readl (ioaddr + FramesWDeferredXmt) + temp2;
/* detailed rx_error */
np->stats.rx_length_errors += readw (ioaddr + FrameTooLongErrors);
np->stats.rx_crc_errors += readw (ioaddr + FrameCheckSeqError);
np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
np->stats.collisions += readl (ioaddr + SingleColFrames)
+ readl (ioaddr + MultiColFrames);
/* detailed tx errors */
stat_reg = readw (ioaddr + FramesAbortXSColls);
np->stats.tx_aborted_errors += stat_reg;
np->stats.tx_errors += stat_reg;
stat_reg = readw (ioaddr + CarrierSenseErrors);
np->stats.tx_carrier_errors += stat_reg;
np->stats.tx_errors += stat_reg;
/* Clear all other statistic register. */
readw (ioaddr + InRangeLengthErrors);
readw (ioaddr + MacControlFramesXmtd);
readl (ioaddr + McstOctetXmtOk);
readw (ioaddr + BcstFramesXmtdOk);
readl (ioaddr + McstFramesXmtdOk);
readw (ioaddr + BcstFramesRcvdOk);
readw (ioaddr + MacControlFramesRcvd);
readw (ioaddr + FrameTooLongErrors);
readw (ioaddr + InRangeLengthErrors);
readw (ioaddr + FramesCheckSeqErrors);
readw (ioaddr + FramesLostRxErrors);
readl (ioaddr + McstOctetXmtOk);
readl (ioaddr + BcstOctetXmtOk);
readl (ioaddr + McstFramesXmtdOk);
readl (ioaddr + FramesWDeferredXmt);
readl (ioaddr + LateCollisions);
readw (ioaddr + BcstFramesXmtdOk);
readw (ioaddr + MacControlFramesXmtd);
readw (ioaddr + FramesWEXDeferal);
for (i = 0x100; i <= 0x150; i += 4)
readl (ioaddr + i);
readw (ioaddr + TxJumboFrames);
readw (ioaddr + RxJumboFrames);
readw (ioaddr + TCPCheckSumErrors);
readw (ioaddr + UDPCheckSumErrors);
readw (ioaddr + IPCheckSumErrors);
return &np->stats;
}
static int
clear_stats (struct net_device *dev)
{
long ioaddr = dev->base_addr;
int i;
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
readl (ioaddr + FramesRcvOk);
readl (ioaddr + FramesXmtOk);
readl (ioaddr + OctetRcvOk);
readl (ioaddr + OctetXmtOk);
readl (ioaddr + McstFramesRcvdOk);
readl (ioaddr + SingleColFrames);
readl (ioaddr + MultiColFrames);
readl (ioaddr + LateCollisions);
/* detailed rx errors */
readw (ioaddr + FrameTooLongErrors);
readw (ioaddr + InRangeLengthErrors);
readw (ioaddr + FramesCheckSeqErrors);
readw (ioaddr + FramesLostRxErrors);
/* detailed tx errors */
readw (ioaddr + FramesAbortXSColls);
readw (ioaddr + CarrierSenseErrors);
/* Clear all other statistic register. */
readl (ioaddr + McstOctetXmtOk);
readw (ioaddr + BcstFramesXmtdOk);
readl (ioaddr + McstFramesXmtdOk);
readw (ioaddr + BcstFramesRcvdOk);
readw (ioaddr + MacControlFramesRcvd);
readw (ioaddr + BcstFramesRcvOk);
readl (ioaddr + McstFramesRcvOk);
readl (ioaddr + BcstOctetRcvOk);
readl (ioaddr + McstOctetXmtOk);
readl (ioaddr + BcstOctetXmtOk);
readl (ioaddr + McstFramesXmtdOk);
readl (ioaddr + FramesWDeferredXmt);
readw (ioaddr + BcstFramesXmtdOk);
readw (ioaddr + MacControlFramesXmtd);
readw (ioaddr + FramesWEXDeferal);
for (i = 0x100; i <= 0x150; i += 4)
readl (ioaddr + i);
......@@ -1034,9 +1075,10 @@ get_stats (struct net_device *dev)
readw (ioaddr + TCPCheckSumErrors);
readw (ioaddr + UDPCheckSumErrors);
readw (ioaddr + IPCheckSumErrors);
return &np->stats;
return 0;
}
int
change_mtu (struct net_device *dev, int new_mtu)
{
......@@ -1111,51 +1153,35 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
int phy_addr;
struct netdev_private *np = dev->priv;
struct mii_data *miidata = (struct mii_data *) &rq->ifr_data;
#ifdef RIO_DEBUG
struct ioctl_data *iodata = (struct ioctl_data *) (rq->ifr_data);
#endif
u16 *data = (u16 *) & rq->ifr_data;
struct netdev_desc *desc;
int i;
phy_addr = np->phy_addr;
switch (cmd) {
case SIOCDEVPRIVATE:
#ifdef RIO_DEBUG
if (rio_ioctl_ext (dev, iodata) != 0)
return -EOPNOTSUPP;
break;
#else
return -EOPNOTSUPP;
#endif
case SIOCDEVPRIVATE + 1:
miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
break;
case SIOCDEVPRIVATE + 2:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
break;
case SIOCDEVPRIVATE + 3:
np->rx_debug = (data[0] <= 7) ? data[0] : 0;
printk ("rx_debug = %d\n", np->rx_debug);
break;
case SIOCDEVPRIVATE + 4:
np->tx_debug = (data[0] <= 7) ? data[0] : 0;
printk ("tx_debug = %d\n", np->tx_debug);
break;
case SIOCDEVPRIVATE + 5:
np->tx_full = 1;
netif_stop_queue (dev);
break;
case SIOCDEVPRIVATE + 6:
np->tx_full = 0;
netif_wake_queue (dev);
break;
case SIOCDEVPRIVATE + 7:
printk
("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
np->tx_full, np->cur_tx, np->old_tx, np->cur_rx,
netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
np->old_rx);
break;
case SIOCDEVPRIVATE + 8:
......@@ -1163,7 +1189,8 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
for (i = 0; i < TX_RING_SIZE; i++) {
desc = &np->tx_ring[i];
printk
("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
i,
(u32) (np->tx_ring_dma + i * sizeof (*desc)),
(u32) desc->next_desc,
(u32) desc->status, (u32) (desc->fraginfo >> 32),
......@@ -1172,110 +1199,17 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
}
printk ("\n");
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
#ifdef RIO_DEBUG
int
rio_ioctl_ext (struct net_device *dev, struct ioctl_data *iodata)
{
struct netdev_private *np = dev->priv;
int phy_addr = np->phy_addr;
u32 hi, lo;
int i;
BMCR_t bmcr;
BMSR_t bmsr;
if (iodata == NULL)
goto invalid_cmd;
if (strcmp (iodata->signature, "rio") != 0)
goto invalid_cmd;
switch (iodata->cmd) {
case 0:
for (i = 0; i < TX_RING_SIZE; i++) {
hi = np->tx_ring[i].status >> 32;
lo = np->tx_ring[i].status;
printk ("TFC=%08x %08x \n", hi, lo);
}
break;
case 1:
for (i = 0; i < RX_RING_SIZE; i++) {
hi = np->rx_ring[i].status >> 32;
lo = np->rx_ring[i].status;
printk ("RFS=%08x %08x \n", hi, lo);
}
break;
case 2:
break;
case 3:
if (iodata->data != NULL)
np->tx_debug = iodata->data[0];
break;
case 4:
/* Soft reset PHY */
mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
bmcr.image = 0;
bmcr.bits.an_enable = 1;
bmcr.bits.reset = 1;
mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
break;
case 5:
mii_write (dev, phy_addr, MII_BMCR, 0x1940);
mdelay (10);
mii_write (dev, phy_addr, MII_BMCR, 0x1940);
mdelay (100); /* wait a certain time */
break;
case 6:
/* 5) Set media and Power Up */
bmcr.image = 0;
bmcr.bits.power_down = 1;
if (np->an_enable) {
bmcr.bits.an_enable = 1;
} else {
if (np->speed == 100) {
bmcr.bits.speed100 = 1;
bmcr.bits.speed1000 = 0;
printk ("Manual 100 Mbps, ");
} else if (np->speed == 10) {
bmcr.bits.speed100 = 0;
bmcr.bits.speed1000 = 0;
printk ("Manual 10 Mbps, ");
}
if (np->full_duplex) {
bmcr.bits.duplex_mode = 1;
printk ("Full duplex. \n");
} else {
bmcr.bits.duplex_mode = 0;
printk ("Half duplex.\n");
}
}
mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
break;
case 7:
bmcr.image = mii_read (dev, phy_addr, MII_BMCR);
bmsr.image = mii_read (dev, phy_addr, MII_BMSR);
printk ("BMCR=%x BMSR=%x LinkUp=%d\n",
bmcr.image, bmsr.image, bmsr.bits.link_status);
break;
default:
return -EOPNOTSUPP;
}
return 0;
invalid_cmd:
return -1;
}
#endif
#define EEP_READ 0x0200
#define EEP_BUSY 0x8000
/* Read the EEPROM word */
static int __devinit
int
read_eeprom (long ioaddr, int eep_addr)
{
int i = 1000;
......@@ -1441,10 +1375,11 @@ mii_get_media (struct net_device *dev)
printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
}
if (negotiate.bits.pause) {
np->tx_flow = 1;
np->rx_flow = 1;
np->tx_flow &= 1;
np->rx_flow &= 1;
} else if (negotiate.bits.asymmetric) {
np->rx_flow = 1;
np->tx_flow = 0;
np->rx_flow &= 1;
}
/* else tx_flow, rx_flow = user select */
} else {
......@@ -1593,10 +1528,11 @@ mii_get_media_pcs (struct net_device *dev)
np->full_duplex = 0;
}
if (negotiate.bits.pause) {
np->tx_flow = 1;
np->rx_flow = 1;
np->tx_flow &= 1;
np->rx_flow &= 1;
} else if (negotiate.bits.asymmetric) {
np->rx_flow = 1;
np->tx_flow = 0;
np->rx_flow &= 1;
}
/* else tx_flow, rx_flow = user select */
} else {
......
......@@ -119,13 +119,13 @@ enum dl2x_offsets {
McstOctetRcvOk = 0xac,
BcstOctetRcvOk = 0xb0,
FramesRcvOk = 0xb4,
McstFramesRcvOk = 0xb8,
BcstFramesRcvOk = 0xbe,
McstFramesRcvdOk = 0xb8,
BcstFramesRcvdOk = 0xbe,
MacControlFramesRcvd = 0xc6,
FrameTooLongErrors = 0xc8,
InRangeLengthErrors = 0xca,
FrameCheckSeqError = 0xcc,
FrameLostRxError = 0xce,
FramesCheckSeqErrors = 0xcc,
FramesLostRxErrors = 0xce,
OctetXmtOk = 0xd0,
McstOctetXmtOk = 0xd4,
BcstOctetXmtOk = 0xd8,
......@@ -264,6 +264,7 @@ enum RFS_bits {
FrameEnd = 0x40000000,
RFDDone = 0x80000000,
TCIShift = 32,
RFS_Errors = 0x003f0000,
};
#define MII_RESET_TIME_OUT 10000
......@@ -648,7 +649,7 @@ struct netdev_private {
dma_addr_t tx_ring_dma;
dma_addr_t rx_ring_dma;
struct pci_dev *pdev;
spinlock_t lock;
spinlock_t tx_lock;
spinlock_t rx_lock;
struct net_device_stats stats;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
......@@ -657,7 +658,6 @@ struct netdev_private {
unsigned int chip_id; /* PCI table chip id */
unsigned int rx_coalesce; /* Maximum frames each RxDMAComplete intr */
unsigned int rx_timeout; /* Wait time between RxDMAComplete intr */
unsigned int tx_full:1; /* The Tx queue is full. */
unsigned int full_duplex:1; /* Full-duplex operation requested. */
unsigned int an_enable:2; /* Auto-Negotiated Enable */
unsigned int jumbo:1; /* Jumbo frame enable */
......@@ -665,6 +665,7 @@ struct netdev_private {
unsigned int tx_flow:1; /* Tx flow control enable */
unsigned int rx_flow:1; /* Rx flow control enable */
unsigned int phy_media:1; /* 1: fiber, 0: copper */
unsigned int link_status:1; /* Current link status */
unsigned char pci_rev_id; /* PCI revision ID */
struct netdev_desc *last_tx; /* Last Tx descriptor used. */
unsigned long cur_rx, old_rx; /* Producer/consumer ring indices */
......@@ -677,8 +678,6 @@ struct netdev_private {
u16 advertising; /* NWay media advertisement */
u16 negotiate; /* Negotiated media */
int phy_addr; /* PHY addresses. */
int tx_debug;
int rx_debug;
};
/* The station address location in the EEPROM. */
......@@ -707,114 +706,4 @@ MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
#define DEFAULT_RXT 750
#define DEFAULT_TXC 1
#define MAX_TXC 8
#ifdef RIO_DEBUG
#define DEBUG_TFD_DUMP(x) debug_tfd_dump(x)
#define DEBUG_RFD_DUMP(x,flag) debug_rfd_dump(x,flag)
#define DEBUG_PKT_DUMP(x,len) debug_pkt_dump(x,len)
#define DEBUG_PRINT printk
static inline void
debug_tfd_dump (struct netdev_private *np)
{
int i;
struct netdev_desc *desc;
if (np->tx_debug == 6) {
printk ("TFDone Dump: ");
for (i = 0; i < TX_RING_SIZE; i++) {
desc = &np->tx_ring[i];
if ((desc->fraginfo & 0xffffffffff) == 0)
printk ("X");
else
printk ("%d", (desc->status & TFDDone) ? 1 : 0);
}
printk ("\n");
}
if (np->tx_debug == 5) {
for (i = 0; i < TX_RING_SIZE; i++) {
desc = &np->tx_ring[i];
printk
("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
(u32) np->tx_ring_dma + i * sizeof (*desc),
(u32) desc->next_desc, (u32) desc->status,
(u32) (desc->fraginfo >> 32),
(u32) desc->fraginfo);
printk ("\n");
}
printk ("\n");
}
}
static inline void
debug_rfd_dump (struct netdev_private *np, int flag)
{
int i;
struct netdev_desc *desc;
int entry = np->cur_rx % RX_RING_SIZE;
if (np->rx_debug == 5) {
for (i = 0; i < RX_RING_SIZE; i++) {
desc = &np->rx_ring[i];
printk
("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
(u32) np->rx_ring_dma + i * sizeof (*desc),
(u32) desc->next_desc, (u32) desc->status,
(u32) (desc->fraginfo >> 32),
(u32) desc->fraginfo);
printk ("\n");
}
printk ("\n");
}
if (np->rx_debug == 6) {
if (flag == 1)
printk ("RFDone Dump: ");
else if (flag == 2)
printk ("Re-Filling: ");
for (i = 0; i < RX_RING_SIZE; i++) {
desc = &np->rx_ring[i];
printk ("%d", (desc->status & RFDDone) ? 1 : 0);
}
printk ("\n");
}
if (np->rx_debug == 7) {
printk (" In rcv_pkt(), entry %d status %4.4x %4.4x.\n",
entry, (u32) (np->rx_ring[entry].status >> 32),
(u32) np->rx_ring[entry].status);
}
}
static inline void
debug_pkt_dump (struct netdev_private *np, int pkt_len)
{
int entry = np->cur_rx % RX_RING_SIZE;
struct netdev_desc *desc = &np->rx_ring[entry];
u64 frame_status = le64_to_cpu (desc->status);
unsigned char *pchar;
unsigned char *phead;
int i;
if (np->rx_debug == 4) {
printk (" rcv_pkt: status was %4.4x %4.4x.\n",
(u32) (frame_status >> 32), (u32) frame_status);
}
if (np->rx_debug == 7) {
#error Please convert me to Documentation/DMA-mapping.txt
phead =
bus_to_virt (le64_to_cpu (desc->fraginfo & 0xffffffffff));
for (pchar = phead, i = 0; i < pkt_len; i++, pchar++) {
printk ("%02x ", *pchar);
if ((i + 1) % 20 == 0)
printk ("\n");
}
}
}
#else
#define DEBUG_TFD_DUMP(x) {}
#define DEBUG_RFD_DUMP(x,flag) {}
#define DEBUG_PKT_DUMP(x,len) {}
#define DEBUG_PRINT() {}
#endif
#endif /* __DL2K_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment