Commit 54d96dd1 authored by Jeff Garzik's avatar Jeff Garzik

Merge DaveM's cleanup of Broadcom's GPL'd 4401 net driver

parent d0051184
......@@ -1066,6 +1066,20 @@ config APRICOT
<file:Documentation/networking/net-modules.txt>. The module will be
called apricot.o.
config B44
tristate "Broadcom 4400 ethernet support (EXPERIMENTAL)"
depends on NET_PCI && PCI && EXPERIMENTAL
help
If you have a network (Ethernet) controller of this type, say Y and
read the Ethernet-HOWTO, available from
<http://www.linuxdoc.org/docs.html#howto>.
If you want to compile this as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/modules.txt> as well as
<file:Documentation/networking/net-modules.txt>. The module will be
called b44.o.
config CS89x0
tristate "CS89x0 support"
depends on NET_PCI && ISA
......
......@@ -101,6 +101,7 @@ obj-$(CONFIG_ES3210) += es3210.o 8390.o
obj-$(CONFIG_LNE390) += lne390.o 8390.o
obj-$(CONFIG_NE3210) += ne3210.o 8390.o
obj-$(CONFIG_NET_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_B44) += b44.o
obj-$(CONFIG_PPP) += ppp_generic.o slhc.o
obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
......
/* b44.c: Broadcom 4400 device driver.
*
* Copyright (C) 2002 David S. Miller (davem@redhat.com)
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include "b44.h"
#define DRV_MODULE_NAME "b44"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "0.1"
#define DRV_MODULE_RELDATE "Nov 6, 2002"
#define B44_DEF_MSG_ENABLE \
(NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
NETIF_MSG_LINK | \
NETIF_MSG_TIMER | \
NETIF_MSG_IFDOWN | \
NETIF_MSG_IFUP | \
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
/* length of time before we decide the hardware is borked,
* and dev->tx_timeout() should be called to fix the problem
*/
#define B44_TX_TIMEOUT (5 * HZ)
/* hardware minimum and maximum for a single frame's data payload */
#define B44_MIN_MTU 60
#define B44_MAX_MTU 1500
#define B44_RX_RING_SIZE 512
#define B44_DEF_RX_RING_PENDING 200
#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
B44_RX_RING_SIZE)
#define B44_TX_RING_SIZE 512
#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
B44_TX_RING_SIZE)
#define TX_RING_GAP(BP) \
(B44_TX_RING_SIZE - (BP)->tx_pending)
#define TX_BUFFS_AVAIL(BP) \
(((BP)->tx_cons <= (BP)->tx_prod) ? \
(BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
(BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
/* XXX check this */
#define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
/* minimum number of free TX descriptors required to wake up TX process */
#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller (davem@redhat.com)");
MODULE_DESCRIPTION("Broadcom 4400 ethernet driver");
MODULE_LICENSE("GPL");
MODULE_PARM(b44_debug, "i");
MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
/* XXX put this to pci_ids.h and pci.ids */
#ifndef PCI_DEVICE_ID_BCM4401
#define PCI_DEVICE_ID_BCM4401 0x4401
#endif
static struct pci_device_id b44_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
};
MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
static void b44_halt(struct b44 *);
static void b44_init_rings(struct b44 *);
static int b44_init_hw(struct b44 *);
static void b44_wait_bit(struct b44 *bp, unsigned long reg,
u32 bit, unsigned long timeout, int clear)
{
unsigned long i;
for (i = 0; i < timeout; i++) {
u32 val = br32(reg);
if (clear && !(val & bit))
break;
if (!clear && (val & bit))
break;
udelay(10);
}
if (i == timeout) {
printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
"%lx to %s.\n",
bp->dev->name,
bit, reg,
(clear ? "clear" : "set"));
}
}
/* Sonics SiliconBackplane support routines. ROFL, you should see all the
* buzz words used on this company's website :-)
*
* All of these routines must be invoked with bp->lock held and
* interrupts disabled.
*/
#define SBID_SDRAM 0
#define SBID_PCI_MEM 1
#define SBID_PCI_CFG 2
#define SBID_PCI_DMA 3
#define SBID_SDRAM_SWAPPED 4
#define SBID_ENUM 5
#define SBID_REG_SDRAM 6
#define SBID_REG_ILINE20 7
#define SBID_REG_EMAC 8
#define SBID_REG_CODEC 9
#define SBID_REG_USB 10
#define SBID_REG_PCI 11
#define SBID_REG_MIPS 12
#define SBID_REG_EXTIF 13
#define SBID_EXTIF 14
#define SBID_EJTAG 15
#define SBID_MAX 16
static u32 ssb_get_addr(struct b44 *bp, u32 id, u32 instance)
{
switch (id) {
case SBID_PCI_DMA:
return 0x40000000;
case SBID_ENUM:
return 0x18000000;
case SBID_REG_EMAC:
return 0x18000000;
case SBID_REG_CODEC:
return 0x18001000;
case SBID_REG_PCI:
return 0x18002000;
default:
return 0;
};
}
static u32 ssb_get_core_rev(struct b44 *bp)
{
return (br32(B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
}
static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
{
u32 bar_orig, pci_rev, val;
pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
pci_write_config_dword(bp->pdev, SSB_BAR0_WIN,
ssb_get_addr(bp, SBID_REG_PCI, 0));
pci_rev = ssb_get_core_rev(bp);
val = br32(B44_SBINTVEC);
val |= cores;
bw32(B44_SBINTVEC, val);
val = br32(SSB_PCI_TRANS_2);
val |= SSB_PCI_PREF | SSB_PCI_BURST;
bw32(SSB_PCI_TRANS_2, val);
pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
return pci_rev;
}
static void ssb_core_disable(struct b44 *bp)
{
if (br32(B44_SBTMSLOW) & SBTMSLOW_RESET)
return;
bw32(B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
bw32(B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
SBTMSLOW_REJECT | SBTMSLOW_RESET));
br32(B44_SBTMSLOW);
udelay(1);
bw32(B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
br32(B44_SBTMSLOW);
udelay(1);
}
static void ssb_core_reset(struct b44 *bp)
{
u32 val;
ssb_core_disable(bp);
bw32(B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
br32(B44_SBTMSLOW);
udelay(1);
/* Clear SERR if set, this is a hw bug workaround. */
if (br32(B44_SBTMSHIGH) & SBTMSHIGH_SERR)
bw32(B44_SBTMSHIGH, 0);
val = br32(B44_SBIMSTATE);
if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
bw32(B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
bw32(B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
br32(B44_SBTMSLOW);
udelay(1);
bw32(B44_SBTMSLOW, (SBTMSLOW_CLOCK));
br32(B44_SBTMSLOW);
udelay(1);
}
static int ssb_core_unit(struct b44 *bp)
{
#if 0
u32 val = br32(B44_SBADMATCH0);
u32 base;
type = val & SBADMATCH0_TYPE_MASK;
switch (type) {
case 0:
base = val & SBADMATCH0_BS0_MASK;
break;
case 1:
base = val & SBADMATCH0_BS1_MASK;
break;
case 2:
default:
base = val & SBADMATCH0_BS2_MASK;
break;
};
#endif
return 0;
}
static int ssb_is_core_up(struct b44 *bp)
{
return ((br32(B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
== SBTMSLOW_CLOCK);
}
static void __b44_cam_write(struct b44 *bp, char *data, int index)
{
u32 val;
val = ((u32) data[2]) << 24;
val |= ((u32) data[3]) << 16;
val |= ((u32) data[4]) << 8;
val |= ((u32) data[5]) << 0;
bw32(B44_CAM_DATA_LO, val);
val = (CAM_DATA_HI_VALID |
(((u32) data[0]) << 8) |
(((u32) data[1]) << 0));
bw32(B44_CAM_DATA_HI, val);
bw32(B44_CAM_CTRL, (CAM_CTRL_WRITE |
(index << CAM_CTRL_INDEX_SHIFT)));
b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
}
static inline void __b44_disable_ints(struct b44 *bp)
{
bw32(B44_IMASK, 0);
}
static void b44_disable_ints(struct b44 *bp)
{
__b44_disable_ints(bp);
/* Flush posted writes. */
br32(B44_IMASK);
}
static void b44_enable_ints(struct b44 *bp)
{
bw32(B44_IMASK, bp->imask);
}
static int b44_readphy(struct b44 *bp, int reg, u32 *val)
{
bw32(B44_EMAC_ISTAT, EMAC_INT_MII);
bw32(B44_MDIO_DATA, (MDIO_DATA_SB_START |
(MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
(bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
(reg << MDIO_DATA_RA_SHIFT) |
(MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
*val = br32(B44_MDIO_DATA) & MDIO_DATA_DATA;
return 0;
}
static int b44_writephy(struct b44 *bp, int reg, u32 val)
{
bw32(B44_EMAC_ISTAT, EMAC_INT_MII);
bw32(B44_MDIO_DATA, (MDIO_DATA_SB_START |
(MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
(bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
(reg << MDIO_DATA_RA_SHIFT) |
(MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
(val & MDIO_DATA_DATA)));
b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
return 0;
}
static int b44_phy_reset(struct b44 *bp)
{
u32 val;
int err;
err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
if (err)
return err;
udelay(100);
err = b44_readphy(bp, MII_BMCR, &val);
if (!err) {
if (val & BMCR_RESET) {
printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
bp->dev->name);
err = -ENODEV;
}
}
return 0;
}
#if 0
static int b44_set_power_state(struct b44 *bp, int state)
{
}
#endif
static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
{
u32 val;
bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
bp->flags |= pause_flags;
val = br32(B44_RXCONFIG);
if (pause_flags & B44_FLAG_RX_PAUSE)
val |= RXCONFIG_FLOW;
else
val &= ~RXCONFIG_FLOW;
bw32(B44_RXCONFIG, val);
val = br32(B44_MAC_FLOW);
if (pause_flags & B44_FLAG_TX_PAUSE)
val |= (MAC_FLOW_PAUSE_ENAB |
(0xc0 & MAC_FLOW_RX_HI_WATER));
else
val &= ~MAC_FLOW_PAUSE_ENAB;
bw32(B44_MAC_FLOW, val);
}
static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
{
u32 pause_enab = bp->flags & (B44_FLAG_TX_PAUSE |
B44_FLAG_RX_PAUSE);
if (local & ADVERTISE_PAUSE_CAP) {
if (local & ADVERTISE_PAUSE_ASYM) {
if (remote & LPA_PAUSE_CAP)
pause_enab |= (B44_FLAG_TX_PAUSE |
B44_FLAG_RX_PAUSE);
else if (remote & LPA_PAUSE_ASYM)
pause_enab |= B44_FLAG_RX_PAUSE;
} else {
if (remote & LPA_PAUSE_CAP)
pause_enab |= (B44_FLAG_TX_PAUSE |
B44_FLAG_RX_PAUSE);
}
} else if (local & ADVERTISE_PAUSE_ASYM) {
if ((remote & LPA_PAUSE_CAP) &&
(remote & LPA_PAUSE_ASYM))
pause_enab |= B44_FLAG_TX_PAUSE;
}
__b44_set_flow_ctrl(bp, pause_enab);
}
static int b44_setup_phy(struct b44 *bp)
{
u32 val;
int err;
if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
goto out;
if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
val & MII_ALEDCTRL_ALLMSK)) != 0)
goto out;
if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
goto out;
if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
val | MII_TLEDCTRL_ENABLE)) != 0)
goto out;
if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
u32 adv = ADVERTISE_CSMA;
if (bp->flags & B44_FLAG_ADV_10HALF)
adv |= ADVERTISE_10HALF;
if (bp->flags & B44_FLAG_ADV_10FULL)
adv |= ADVERTISE_10FULL;
if (bp->flags & B44_FLAG_ADV_100HALF)
adv |= ADVERTISE_100HALF;
if (bp->flags & B44_FLAG_ADV_100FULL)
adv |= ADVERTISE_100FULL;
if (bp->flags & B44_FLAG_PAUSE_AUTO)
adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
goto out;
if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
BMCR_ANRESTART))) != 0)
goto out;
} else {
u32 bmcr;
if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
goto out;
bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
if (bp->flags & B44_FLAG_100_BASE_T)
bmcr |= BMCR_SPEED100;
if (bp->flags & B44_FLAG_FULL_DUPLEX)
bmcr |= BMCR_FULLDPLX;
if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
goto out;
/* Since we will not be negotiating there is no safe way
* to determine if the link partner supports flow control
* or not. So just disable it completely in this case.
*/
b44_set_flow_ctrl(bp, 0, 0);
}
out:
return err;
}
static void b44_stats_update(struct b44 *bp)
{
unsigned long reg;
u32 *val;
val = &bp->hw_stats.tx_good_octets;
for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
*val++ += br32(reg);
}
val = &bp->hw_stats.rx_good_octets;
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
*val++ += br32(reg);
}
}
static void b44_link_report(struct b44 *bp)
{
if (!netif_carrier_ok(bp->dev)) {
printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
} else {
printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
bp->dev->name,
(bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
(bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
"%s for RX.\n",
bp->dev->name,
(bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
(bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
}
}
static void b44_check_phy(struct b44 *bp)
{
u32 bmsr, aux;
if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
!b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
(bmsr != 0xffff)) {
if (aux & MII_AUXCTRL_SPEED)
bp->flags |= B44_FLAG_100_BASE_T;
else
bp->flags &= ~B44_FLAG_100_BASE_T;
if (aux & MII_AUXCTRL_DUPLEX)
bp->flags |= B44_FLAG_FULL_DUPLEX;
else
bp->flags &= ~B44_FLAG_FULL_DUPLEX;
if (!netif_carrier_ok(bp->dev) &&
(bmsr & BMSR_LSTATUS)) {
u32 val = br32(B44_TX_CTRL);
u32 local_adv, remote_adv;
if (bp->flags & B44_FLAG_FULL_DUPLEX)
val |= TX_CTRL_DUPLEX;
else
val &= ~TX_CTRL_DUPLEX;
bw32(B44_TX_CTRL, val);
if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
!b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
!b44_readphy(bp, MII_LPA, &remote_adv))
b44_set_flow_ctrl(bp, local_adv, remote_adv);
/* Link now up */
netif_carrier_on(bp->dev);
b44_link_report(bp);
} else if (netif_carrier_ok(bp->dev)) {
/* Link now down */
netif_carrier_off(bp->dev);
b44_link_report(bp);
}
if (bmsr & BMSR_RFAULT)
printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
bp->dev->name);
if (bmsr & BMSR_JCD)
printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
bp->dev->name);
}
}
static void b44_timer(unsigned long __opaque)
{
struct b44 *bp = (struct b44 *) __opaque;
spin_lock_irq(&bp->lock);
b44_check_phy(bp);
b44_stats_update(bp);
spin_unlock_irq(&bp->lock);
bp->timer.expires = jiffies + HZ;
add_timer(&bp->timer);
}
static void b44_tx(struct b44 *bp)
{
u32 cur, cons;
cur = br32(B44_DMATX_STAT) & DMATX_STAT_CDMASK;
cur /= sizeof(struct dma_desc);
/* XXX needs updating when NETIF_F_SG is supported */
for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
struct ring_info *rp = &bp->tx_buffers[cons];
struct sk_buff *skb = rp->skb;
if (unlikely(skb == NULL))
BUG();
pci_unmap_single(bp->pdev,
pci_unmap_addr(rp, mapping),
skb->len,
PCI_DMA_TODEVICE);
rp->skb = NULL;
dev_kfree_skb_irq(skb);
}
bp->tx_cons = cons;
if (netif_queue_stopped(bp->dev) &&
TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
netif_wake_queue(bp->dev);
bw32(B44_GPTIMER, 0);
}
static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
{
struct dma_desc *dp;
struct ring_info *src_map, *map;
struct sk_buff *skb;
dma_addr_t mapping;
int dest_idx;
u32 ctrl;
src_map = NULL;
if (src_idx >= 0)
src_map = &bp->rx_buffers[src_idx];
dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
map = &bp->rx_buffers[dest_idx];
skb = dev_alloc_skb(RX_PKT_BUF_SZ);
if (skb == NULL)
return -ENOMEM;
skb->dev = bp->dev;
mapping = pci_map_single(bp->pdev, skb->data,
RX_PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
skb_reserve(skb, bp->rx_offset);
map->skb = skb;
pci_unmap_addr_set(map, mapping, mapping);
if (src_map != NULL)
src_map->skb = NULL;
ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
if (dest_idx == (B44_RX_RING_SIZE - 1))
ctrl |= DESC_CTRL_EOT;
dp = &bp->rx_ring[dest_idx];
dp->ctrl = cpu_to_le32(ctrl);
dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
return RX_PKT_BUF_SZ;
}
static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
{
struct dma_desc *src_desc, *dest_desc;
struct ring_info *src_map, *dest_map;
int dest_idx;
u32 ctrl;
dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
dest_desc = &bp->rx_ring[dest_idx];
dest_map = &bp->rx_buffers[dest_idx];
src_desc = &bp->rx_ring[src_idx];
src_map = &bp->rx_buffers[src_idx];
dest_map->skb = src_map->skb;
pci_unmap_addr_set(dest_map, mapping,
pci_unmap_addr(src_map, mapping));
ctrl = src_desc->ctrl;
if (dest_idx == (B44_RX_RING_SIZE - 1))
ctrl |= cpu_to_le32(DESC_CTRL_EOT);
dest_desc->ctrl = ctrl;
dest_desc->addr = src_desc->addr;
}
static int b44_rx(struct b44 *bp, int budget)
{
int received;
u32 cons, prod;
received = 0;
prod = br32(B44_DMARX_STAT) & DMARX_STAT_CDMASK;
prod /= sizeof(struct dma_desc);
cons = bp->rx_cons;
while (cons != prod && budget > 0) {
struct ring_info *rp = &bp->rx_buffers[cons];
struct sk_buff *skb = rp->skb;
dma_addr_t map = pci_unmap_addr(rp, mapping);
struct rx_header *rh;
u16 len;
pci_dma_sync_single(bp->pdev, map,
RX_PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
rh = (struct rx_header *) (skb->data - bp->rx_offset);
len = cpu_to_le16(rh->len);
if (rh->flags & cpu_to_le16(RX_FLAG_ERRORS)) {
drop_it:
b44_recycle_rx(bp, cons, bp->rx_prod);
drop_it_no_recycle:
bp->stats.rx_dropped++;
goto next_pkt;
}
if (len > RX_COPY_THRESHOLD) {
int skb_size;
skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
if (skb_size < 0)
goto drop_it;
pci_unmap_single(bp->pdev, map,
skb_size, PCI_DMA_FROMDEVICE);
skb_put(skb, len);
} else {
struct sk_buff *copy_skb;
b44_recycle_rx(bp, cons, bp->rx_prod);
copy_skb = dev_alloc_skb(len + 2);
if (copy_skb == NULL)
goto drop_it_no_recycle;
copy_skb->dev = bp->dev;
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
/* DMA sync done above */
memcpy(copy_skb->data, skb->data, len);
skb = copy_skb;
}
skb->ip_summed = CHECKSUM_NONE;
skb->protocol = eth_type_trans(skb, bp->dev);
netif_receive_skb(skb);
bp->dev->last_rx = jiffies;
received++;
budget--;
next_pkt:
bp->rx_prod = (bp->rx_prod + 1) &
(B44_RX_RING_SIZE - 1);
cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
}
bp->rx_cons = cons;
return received;
}
static int b44_poll(struct net_device *netdev, int *budget)
{
struct b44 *bp = netdev->priv;
int done;
spin_lock_irq(&bp->lock);
if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
/* spin_lock(&bp->tx_lock); */
b44_tx(bp);
/* spin_unlock(&bp->tx_lock); */
}
done = 1;
if (bp->istat & ISTAT_RX) {
int orig_budget = *budget;
int work_done;
if (orig_budget > netdev->quota)
orig_budget = netdev->quota;
work_done = b44_rx(bp, orig_budget);
*budget -= work_done;
netdev->quota -= work_done;
if (work_done >= orig_budget)
done = 0;
}
if (bp->istat & ISTAT_ERRORS) {
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp);
netif_wake_queue(bp->dev);
done = 1;
}
if (done) {
netif_rx_complete(netdev);
b44_enable_ints(bp);
}
spin_unlock_irq(&bp->lock);
return (done ? 0 : 1);
}
static void b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = dev_id;
struct b44 *bp = dev->priv;
unsigned long flags;
u32 istat, imask;
spin_lock_irqsave(&bp->lock, flags);
istat = br32(B44_ISTAT);
imask = br32(B44_IMASK);
/* ??? What the fuck is the purpose of the interrupt mask
* ??? register if we have to mask it out by hand anyways?
*/
istat &= imask;
if (istat) {
if (netif_rx_schedule_prep(dev)) {
/* NOTE: These writes are posted by the readback of
* the ISTAT register below.
*/
bp->istat = istat;
__b44_disable_ints(bp);
__netif_rx_schedule(dev);
} else {
printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
dev->name);
}
bw32(B44_ISTAT, istat);
br32(B44_ISTAT);
}
spin_unlock_irqrestore(&bp->lock, flags);
}
static void b44_tx_timeout(struct net_device *dev)
{
struct b44 *bp = dev->priv;
printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
dev->name);
spin_lock_irq(&bp->lock);
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp);
spin_unlock_irq(&bp->lock);
netif_wake_queue(dev);
}
static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct b44 *bp = dev->priv;
dma_addr_t mapping;
u32 len, entry, ctrl;
len = skb->len;
spin_lock_irq(&bp->lock);
/* This is a hard error, log it. */
if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
netif_stop_queue(dev);
spin_unlock_irq(&bp->lock);
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
dev->name);
return 1;
}
entry = bp->tx_prod;
mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
bp->tx_buffers[entry].skb = skb;
pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
ctrl = (len & DESC_CTRL_LEN);
ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
if (entry == (B44_TX_RING_SIZE - 1))
ctrl |= DESC_CTRL_EOT;
bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping);
entry = NEXT_TX(entry);
bp->tx_prod = entry;
wmb();
bw32(B44_DMATX_PTR, entry * sizeof(struct dma_desc));
if (bp->flags & B44_FLAG_BUGGY_TXPTR)
bw32(B44_DMATX_PTR, entry * sizeof(struct dma_desc));
if (bp->flags & B44_FLAG_REORDER_BUG)
br32(B44_DMATX_PTR);
if (TX_BUFFS_AVAIL(bp) < 1)
netif_stop_queue(dev);
spin_unlock_irq(&bp->lock);
dev->trans_start = jiffies;
return 0;
}
static int b44_change_mtu(struct net_device *dev, int new_mtu)
{
struct b44 *bp = dev->priv;
if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
return -EINVAL;
if (!netif_running(dev)) {
/* We'll just catch it later when the
* device is up'd.
*/
dev->mtu = new_mtu;
return 0;
}
spin_lock_irq(&bp->lock);
b44_halt(bp);
dev->mtu = new_mtu;
b44_init_rings(bp);
b44_init_hw(bp);
spin_unlock_irq(&bp->lock);
return 0;
}
/* Free up pending packets in all rx/tx rings.
*
* The chip has been shut down and the driver detached from
* the networking, so no interrupts or new tx packets will
* end up in the driver. bp->lock is not held and we are not
* in an interrupt context and thus may sleep.
*/
static void b44_free_rings(struct b44 *bp)
{
struct ring_info *rp;
int i;
for (i = 0; i < B44_RX_RING_SIZE; i++) {
rp = &bp->rx_buffers[i];
if (rp->skb == NULL)
continue;
pci_unmap_single(bp->pdev,
pci_unmap_addr(rp, mapping),
RX_PKT_BUF_SZ - bp->rx_offset,
PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(rp->skb);
rp->skb = NULL;
}
/* XXX needs changes once NETIF_F_SG is set... */
for (i = 0; i < B44_TX_RING_SIZE; i++) {
rp = &bp->tx_buffers[i];
if (rp->skb == NULL)
continue;
pci_unmap_single(bp->pdev,
pci_unmap_addr(rp, mapping),
rp->skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_any(rp->skb);
rp->skb = NULL;
}
}
/* Initialize tx/rx rings for packet processing.
*
* The chip has been shut down and the driver detached from
* the networking, so no interrupts or new tx packets will
* end up in the driver. bp->lock is not held and we are not
* in an interrupt context and thus may sleep.
*/
static void b44_init_rings(struct b44 *bp)
{
int i;
b44_free_rings(bp);
memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
for (i = 0; i < bp->rx_pending; i++) {
if (b44_alloc_rx_skb(bp, -1, i) < 0)
break;
}
}
/*
* Must not be invoked with interrupt sources disabled and
* the hardware shutdown down.
*/
static void b44_free_consistent(struct b44 *bp)
{
if (bp->rx_buffers) {
kfree(bp->rx_buffers);
bp->rx_buffers = NULL;
}
if (bp->tx_buffers) {
kfree(bp->tx_buffers);
bp->tx_buffers = NULL;
}
if (bp->rx_ring) {
pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
bp->rx_ring, bp->rx_ring_dma);
bp->rx_ring = NULL;
}
if (bp->tx_ring) {
pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
bp->tx_ring, bp->tx_ring_dma);
bp->tx_ring = NULL;
}
}
/*
* Must not be invoked with interrupt sources disabled and
* the hardware shutdown down. Can sleep.
*/
static int b44_alloc_consistent(struct b44 *bp)
{
int size;
size = B44_RX_RING_SIZE * sizeof(struct ring_info);
bp->rx_buffers = kmalloc(size, GFP_KERNEL);
if (!bp->rx_buffers)
goto out_err;
memset(bp->rx_buffers, 0, size);
size = B44_TX_RING_SIZE * sizeof(struct ring_info);
bp->tx_buffers = kmalloc(size, GFP_KERNEL);
if (!bp->tx_buffers)
goto out_err;
memset(bp->tx_buffers, 0, size);
size = DMA_TABLE_BYTES;
bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
if (!bp->rx_ring)
goto out_err;
bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
if (!bp->tx_ring)
goto out_err;
return 0;
out_err:
b44_free_consistent(bp);
return -ENOMEM;
}
/* bp->lock is held. */
static void b44_clear_stats(struct b44 *bp)
{
unsigned long reg;
bw32(B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
br32(reg);
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
br32(reg);
}
/* bp->lock is held. */
static void b44_chip_reset(struct b44 *bp)
{
if (ssb_is_core_up(bp)) {
bw32(B44_RCV_LAZY, 0);
bw32(B44_ENET_CTRL, ENET_CTRL_DISABLE);
b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
bw32(B44_DMATX_CTRL, 0);
bp->tx_prod = bp->tx_cons = 0;
b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE, 100, 0);
bw32(B44_DMARX_CTRL, 0);
bp->rx_prod = bp->rx_cons = 0;
} else {
ssb_pci_setup(bp, (bp->core_unit == 0 ?
SBINTVEC_ENET0 :
SBINTVEC_ENET1));
}
ssb_core_reset(bp);
b44_clear_stats(bp);
/* Make PHY accessible. */
bw32(B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
(0x0d & MDIO_CTRL_MAXF_MASK)));
br32(B44_MDIO_CTRL);
if (!(br32(B44_DEVCTRL) & DEVCTRL_IPP)) {
bw32(B44_ENET_CTRL, ENET_CTRL_EPSEL);
br32(B44_ENET_CTRL);
bp->flags &= ~B44_FLAG_INTERNAL_PHY;
} else {
u32 val = br32(B44_DEVCTRL);
if (val & DEVCTRL_EPR) {
bw32(B44_DEVCTRL, (val & ~DEVCTRL_EPR));
br32(B44_DEVCTRL);
udelay(100);
}
bp->flags |= B44_FLAG_INTERNAL_PHY;
}
}
/* bp->lock is held. */
static void b44_halt(struct b44 *bp)
{
b44_disable_ints(bp);
b44_chip_reset(bp);
}
/* bp->lock is held. */
static void __b44_set_mac_addr(struct b44 *bp)
{
bw32(B44_CAM_CTRL, 0);
if (!(bp->dev->flags & IFF_PROMISC)) {
u32 val;
__b44_cam_write(bp, bp->dev->dev_addr, 0);
val = br32(B44_CAM_CTRL);
bw32(B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
}
}
static int b44_set_mac_addr(struct net_device *dev, void *p)
{
struct b44 *bp = dev->priv;
struct sockaddr *addr = p;
if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
spin_lock_irq(&bp->lock);
__b44_set_mac_addr(bp);
spin_unlock_irq(&bp->lock);
return 0;
}
/* Called at device open time to get the chip ready for
* packet processing. Invoked with bp->lock held.
*/
static void __b44_set_rx_mode(struct net_device *);
static int b44_init_hw(struct b44 *bp)
{
u32 val;
b44_disable_ints(bp);
b44_chip_reset(bp);
b44_phy_reset(bp);
b44_setup_phy(bp);
val = br32(B44_MAC_CTRL);
bw32(B44_MAC_CTRL, val | MAC_CTRL_CRC32_ENAB);
bw32(B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
/* This sets the MAC address too. */
__b44_set_rx_mode(bp->dev);
/* MTU + eth header + possible VLAN tag + struct rx_header */
bw32(B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + 24);
bw32(B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + 24);
bw32(B44_TX_WMARK, 56); /* XXX magic */
bw32(B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
bw32(B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
bw32(B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
(bp->rx_offset << DMARX_CTRL_ROSHIFT)));
bw32(B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
bw32(B44_DMARX_PTR, bp->rx_pending);
bw32(B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
val = br32(B44_ENET_CTRL);
bw32(B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
return 0;
}
static int b44_open(struct net_device *dev)
{
struct b44 *bp = dev->priv;
int err;
err = b44_alloc_consistent(bp);
if (err)
return err;
err = request_irq(dev->irq, b44_interrupt,
SA_SHIRQ, dev->name, dev);
if (err) {
b44_free_consistent(bp);
return err;
}
spin_lock_irq(&bp->lock);
b44_init_rings(bp);
err = b44_init_hw(bp);
if (err) {
b44_halt(bp);
b44_free_rings(bp);
} else {
bp->flags |= B44_FLAG_INIT_COMPLETE;
}
spin_unlock_irq(&bp->lock);
if (err) {
free_irq(dev->irq, dev);
b44_free_consistent(bp);
return err;
} else {
init_timer(&bp->timer);
bp->timer.expires = jiffies + HZ;
bp->timer.data = (unsigned long) bp;
bp->timer.function = b44_timer;
add_timer(&bp->timer);
}
spin_lock_irq(&bp->lock);
b44_enable_ints(bp);
spin_unlock_irq(&bp->lock);
return 0;
}
#if 0
/*static*/ void b44_dump_state(struct b44 *bp)
{
u32 val32, val32_2, val32_3, val32_4, val32_5;
u16 val16;
pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
printk("DEBUG: PCI status [%04x] \n", val16);
}
#endif
static int b44_close(struct net_device *dev)
{
struct b44 *bp = dev->priv;
netif_stop_queue(dev);
spin_lock_irq(&bp->lock);
spin_unlock_irq(&bp->lock);
free_irq(dev->irq, dev);
b44_free_consistent(bp);
return 0;
}
static struct net_device_stats *b44_get_stats(struct net_device *dev)
{
struct b44 *bp = dev->priv;
struct net_device_stats *nstat = &bp->stats;
struct b44_hw_stats *hwstat = &bp->hw_stats;
/* Convert HW stats into netdevice stats. */
nstat->rx_packets = hwstat->rx_pkts;
nstat->tx_packets = hwstat->tx_pkts;
nstat->rx_bytes = hwstat->rx_octets;
nstat->tx_bytes = hwstat->tx_octets;
nstat->tx_errors = (hwstat->tx_jabber_pkts +
hwstat->tx_oversize_pkts +
hwstat->tx_underruns +
hwstat->tx_excessive_cols +
hwstat->tx_late_cols);
nstat->multicast = hwstat->tx_multicast_pkts;
nstat->collisions = hwstat->tx_total_cols;
nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
hwstat->rx_undersize);
nstat->rx_over_errors = hwstat->rx_missed_pkts;
nstat->rx_frame_errors = hwstat->rx_align_errs;
nstat->rx_crc_errors = hwstat->rx_crc_errs;
nstat->rx_errors = (hwstat->rx_jabber_pkts +
hwstat->rx_oversize_pkts +
hwstat->rx_missed_pkts +
hwstat->rx_crc_align_errs +
hwstat->rx_undersize +
hwstat->rx_crc_errs +
hwstat->rx_align_errs +
hwstat->rx_symbol_errs);
nstat->tx_aborted_errors = hwstat->tx_underruns;
nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
return nstat;
}
static void __b44_load_mcast(struct b44 *bp, struct net_device *dev)
{
struct dev_mc_list *mclist;
int i, num_ents;
num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
mclist = dev->mc_list;
for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
__b44_cam_write(bp, mclist->dmi_addr, i + 1);
}
}
static void __b44_set_rx_mode(struct net_device *dev)
{
struct b44 *bp = dev->priv;
u32 val;
val = br32(B44_RXCONFIG);
val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
if (dev->flags & IFF_PROMISC) {
val |= RXCONFIG_PROMISC;
bw32(B44_RXCONFIG, val);
} else {
__b44_set_mac_addr(bp);
if (dev->flags & IFF_ALLMULTI)
val |= RXCONFIG_ALLMULTI;
else
__b44_load_mcast(bp, dev);
bw32(B44_RXCONFIG, val);
}
}
static void b44_set_rx_mode(struct net_device *dev)
{
struct b44 *bp = dev->priv;
spin_lock_irq(&bp->lock);
__b44_set_rx_mode(dev);
spin_unlock_irq(&bp->lock);
}
static int b44_ethtool_ioctl (struct net_device *dev, void *useraddr)
{
struct b44 *bp = dev->priv;
struct pci_dev *pci_dev = bp->pdev;
u32 ethcmd;
if (copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
return -EFAULT;
switch (ethcmd) {
case ETHTOOL_GDRVINFO:{
struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
strcpy (info.driver, DRV_MODULE_NAME);
strcpy (info.version, DRV_MODULE_VERSION);
memset(&info.fw_version, 0, sizeof(info.fw_version));
strcpy (info.bus_info, pci_dev->slot_name);
info.eedump_len = 0;
info.regdump_len = 0;
if (copy_to_user (useraddr, &info, sizeof (info)))
return -EFAULT;
return 0;
}
case ETHTOOL_GSET: {
struct ethtool_cmd cmd = { ETHTOOL_GSET };
if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
return -EAGAIN;
cmd.supported = (SUPPORTED_Autoneg);
cmd.supported |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_MII);
cmd.advertising = 0;
if (bp->flags & B44_FLAG_ADV_10HALF)
cmd.advertising |= ADVERTISE_10HALF;
if (bp->flags & B44_FLAG_ADV_10FULL)
cmd.advertising |= ADVERTISE_10FULL;
if (bp->flags & B44_FLAG_ADV_100HALF)
cmd.advertising |= ADVERTISE_100HALF;
if (bp->flags & B44_FLAG_ADV_100FULL)
cmd.advertising |= ADVERTISE_100FULL;
cmd.advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
cmd.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
SPEED_100 : SPEED_10;
cmd.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
DUPLEX_FULL : DUPLEX_HALF;
cmd.port = 0;
cmd.phy_address = bp->phy_addr;
cmd.transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
XCVR_INTERNAL : XCVR_EXTERNAL;
cmd.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
AUTONEG_DISABLE : AUTONEG_ENABLE;
cmd.maxtxpkt = 0;
cmd.maxrxpkt = 0;
if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
return -EFAULT;
return 0;
}
case ETHTOOL_SSET: {
struct ethtool_cmd cmd;
if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
return -EAGAIN;
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT;
/* We do not support gigabit. */
if (cmd.autoneg == AUTONEG_ENABLE) {
if (cmd.advertising &
(ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full))
return -EINVAL;
} else if ((cmd.speed != SPEED_100 &&
cmd.speed != SPEED_10) ||
(cmd.duplex != DUPLEX_HALF &&
cmd.duplex != DUPLEX_FULL)) {
return -EINVAL;
}
spin_lock_irq(&bp->lock);
if (cmd.autoneg == AUTONEG_ENABLE) {
bp->flags &= ~B44_FLAG_FORCE_LINK;
bp->flags &= ~(B44_FLAG_ADV_10HALF |
B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF |
B44_FLAG_ADV_100FULL);
if (cmd.advertising & ADVERTISE_10HALF)
bp->flags |= B44_FLAG_ADV_10HALF;
if (cmd.advertising & ADVERTISE_10FULL)
bp->flags |= B44_FLAG_ADV_10FULL;
if (cmd.advertising & ADVERTISE_100HALF)
bp->flags |= B44_FLAG_ADV_100HALF;
if (cmd.advertising & ADVERTISE_100FULL)
bp->flags |= B44_FLAG_ADV_100FULL;
} else {
bp->flags |= B44_FLAG_FORCE_LINK;
if (cmd.speed == SPEED_100)
bp->flags |= B44_FLAG_100_BASE_T;
if (cmd.duplex == DUPLEX_FULL)
bp->flags |= B44_FLAG_FULL_DUPLEX;
}
b44_setup_phy(bp);
spin_unlock_irq(&bp->lock);
return 0;
}
case ETHTOOL_GMSGLVL: {
struct ethtool_value edata = { ETHTOOL_GMSGLVL };
edata.data = bp->msg_enable;
if (copy_to_user(useraddr, &edata, sizeof(edata)))
return -EFAULT;
return 0;
}
case ETHTOOL_SMSGLVL: {
struct ethtool_value edata;
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
bp->msg_enable = edata.data;
return 0;
}
case ETHTOOL_NWAY_RST: {
u32 bmcr;
int r;
spin_lock_irq(&bp->lock);
b44_readphy(bp, MII_BMCR, &bmcr);
b44_readphy(bp, MII_BMCR, &bmcr);
r = -EINVAL;
if (bmcr & BMCR_ANENABLE) {
b44_writephy(bp, MII_BMCR,
bmcr | BMCR_ANRESTART);
r = 0;
}
spin_unlock_irq(&bp->lock);
return r;
}
case ETHTOOL_GLINK: {
struct ethtool_value edata = { ETHTOOL_GLINK };
edata.data = netif_carrier_ok(bp->dev) ? 1 : 0;
if (copy_to_user(useraddr, &edata, sizeof(edata)))
return -EFAULT;
return 0;
}
case ETHTOOL_GRINGPARAM: {
struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
ering.rx_max_pending = B44_RX_RING_SIZE - 1;
ering.rx_pending = bp->rx_pending;
/* XXX ethtool lacks a tx_max_pending, oops... */
if (copy_to_user(useraddr, &ering, sizeof(ering)))
return -EFAULT;
return 0;
}
case ETHTOOL_SRINGPARAM: {
struct ethtool_ringparam ering;
if (copy_from_user(&ering, useraddr, sizeof(ering)))
return -EFAULT;
if ((ering.rx_pending > B44_RX_RING_SIZE - 1) ||
(ering.rx_mini_pending != 0) ||
(ering.rx_jumbo_pending != 0) ||
(ering.tx_pending > B44_TX_RING_SIZE - 1))
return -EINVAL;
spin_lock_irq(&bp->lock);
bp->rx_pending = ering.rx_pending;
bp->tx_pending = ering.tx_pending;
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp);
netif_wake_queue(bp->dev);
spin_unlock_irq(&bp->lock);
return 0;
}
case ETHTOOL_GPAUSEPARAM: {
struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
epause.autoneg =
(bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
epause.rx_pause =
(bp->flags & B44_FLAG_RX_PAUSE) != 0;
epause.tx_pause =
(bp->flags & B44_FLAG_TX_PAUSE) != 0;
if (copy_to_user(useraddr, &epause, sizeof(epause)))
return -EFAULT;
return 0;
}
case ETHTOOL_SPAUSEPARAM: {
struct ethtool_pauseparam epause;
if (copy_from_user(&epause, useraddr, sizeof(epause)))
return -EFAULT;
spin_lock_irq(&bp->lock);
if (epause.autoneg)
bp->flags |= B44_FLAG_PAUSE_AUTO;
else
bp->flags &= ~B44_FLAG_PAUSE_AUTO;
if (epause.rx_pause)
bp->flags |= B44_FLAG_RX_PAUSE;
else
bp->flags &= ~B44_FLAG_RX_PAUSE;
if (epause.tx_pause)
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp);
} else {
__b44_set_flow_ctrl(bp, bp->flags);
}
spin_unlock_irq(&bp->lock);
return 0;
}
};
return -EOPNOTSUPP;
}
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
struct b44 *bp = dev->priv;
int err;
switch (cmd) {
case SIOCETHTOOL:
return b44_ethtool_ioctl(dev, (void *) ifr->ifr_data);
case SIOCGMIIPHY:
data->phy_id = bp->phy_addr;
/* fallthru */
case SIOCGMIIREG: {
u32 mii_regval;
spin_lock_irq(&bp->lock);
err = b44_readphy(bp, data->reg_num & 0x1f, &mii_regval);
spin_unlock_irq(&bp->lock);
data->val_out = mii_regval;
return err;
}
case SIOCSMIIREG:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
spin_lock_irq(&bp->lock);
err = b44_writephy(bp, data->reg_num & 0x1f, data->val_in);
spin_unlock_irq(&bp->lock);
return err;
default:
/* do nothing */
break;
};
return -EOPNOTSUPP;
}
/* Read 128-bytes of EEPROM. */
static int b44_read_eeprom(struct b44 *bp, u8 *data)
{
long i;
u16 *ptr = (u16 *) data;
for (i = 0; i < 128; i += 2)
ptr[i / 2] = readw(bp->regs + 4096 + i);
return 0;
}
static int __devinit b44_get_invariants(struct b44 *bp)
{
u8 eeprom[128];
int err;
err = b44_read_eeprom(bp, &eeprom[0]);
if (err)
goto out;
bp->dev->dev_addr[0] = eeprom[79];
bp->dev->dev_addr[1] = eeprom[78];
bp->dev->dev_addr[2] = eeprom[81];
bp->dev->dev_addr[3] = eeprom[80];
bp->dev->dev_addr[4] = eeprom[83];
bp->dev->dev_addr[5] = eeprom[82];
bp->phy_addr = eeprom[90] & 0x1f;
bp->mdc_port = (eeprom[90] >> 14) & 0x1;
/* With this, plus the rx_header prepended to the data by the
* hardware, we'll land the ethernet header on a 2-byte boundary.
*/
bp->rx_offset = 30;
bp->imask = IMASK_DEF;
bp->core_unit = ssb_core_unit(bp);
bp->dma_offset = ssb_get_addr(bp, SBID_PCI_DMA, 0);
bp->flags |= B44_FLAG_BUGGY_TXPTR;
out:
return err;
}
static int __devinit b44_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static int b44_version_printed = 0;
unsigned long b44reg_base, b44reg_len;
struct net_device *dev;
struct b44 *bp;
int err, i;
if (b44_version_printed++ == 0)
printk(KERN_INFO "%s", version);
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR PFX "Cannot enable PCI device, "
"aborting.\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
printk(KERN_ERR PFX "Cannot find proper PCI device "
"base address, aborting.\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
printk(KERN_ERR PFX "Cannot obtain PCI resources, "
"aborting.\n");
goto err_out_disable_pdev;
}
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, (u64) 0xffffffff);
if (err) {
printk(KERN_ERR PFX "No usable DMA configuration, "
"aborting.\n");
goto err_out_free_res;
}
b44reg_base = pci_resource_start(pdev, 0);
b44reg_len = pci_resource_len(pdev, 0);
dev = alloc_etherdev(sizeof(*bp));
if (!dev) {
printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
err = -ENOMEM;
goto err_out_free_res;
}
SET_MODULE_OWNER(dev);
/* No interesting netdevice features in this card... */
dev->features |= 0;
bp = dev->priv;
bp->pdev = pdev;
bp->dev = dev;
if (b44_debug > 0)
bp->msg_enable = b44_debug;
else
bp->msg_enable = B44_DEF_MSG_ENABLE;
spin_lock_init(&bp->lock);
bp->regs = (unsigned long) ioremap(b44reg_base, b44reg_len);
if (bp->regs == 0UL) {
printk(KERN_ERR PFX "Cannot map device registers, "
"aborting.\n");
err = -ENOMEM;
goto err_out_free_dev;
}
bp->rx_pending = B44_DEF_RX_RING_PENDING;
bp->tx_pending = B44_DEF_TX_RING_PENDING;
dev->open = b44_open;
dev->stop = b44_close;
dev->hard_start_xmit = b44_start_xmit;
dev->get_stats = b44_get_stats;
dev->set_multicast_list = b44_set_rx_mode;
dev->set_mac_address = b44_set_mac_addr;
dev->do_ioctl = b44_ioctl;
dev->tx_timeout = b44_tx_timeout;
dev->poll = b44_poll;
dev->weight = 64;
dev->watchdog_timeo = B44_TX_TIMEOUT;
dev->change_mtu = b44_change_mtu;
dev->irq = pdev->irq;
err = b44_get_invariants(bp);
if (err) {
printk(KERN_ERR PFX "Problem fetching invariants of chip, "
"aborting.\n");
goto err_out_iounmap;
}
/* By default, advertise all speed/duplex settings. */
bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
/* By default, auto-negotiate PAUSE. */
bp->flags |= B44_FLAG_PAUSE_AUTO;
err = register_netdev(dev);
if (err) {
printk(KERN_ERR PFX "Cannot register net device, "
"aborting.\n");
goto err_out_iounmap;
}
pci_set_drvdata(pdev, dev);
pci_save_state(bp->pdev, bp->pci_cfg_state);
printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
for (i = 0; i < 6; i++)
printk("%2.2x%c", dev->dev_addr[i],
i == 5 ? '\n' : ':');
return 0;
err_out_iounmap:
iounmap((void *) bp->regs);
err_out_free_dev:
kfree(dev);
err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
}
static void __devexit b44_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
unregister_netdev(dev);
iounmap((void *) ((struct b44 *)(dev->priv))->regs);
kfree(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
static struct pci_driver b44_driver = {
.name = DRV_MODULE_NAME,
.id_table = b44_pci_tbl,
.probe = b44_init_one,
.remove = __devexit_p(b44_remove_one),
#if 0
.suspend = b44_suspend,
.resume = b44_resume
#endif
};
static int __init b44_init(void)
{
return pci_module_init(&b44_driver);
}
static void __exit b44_cleanup(void)
{
pci_unregister_driver(&b44_driver);
}
module_init(b44_init);
module_exit(b44_cleanup);
#ifndef _B44_H
#define _B44_H
/* Register layout. */
#define B44_DEVCTRL 0x0000UL /* Device Control */
#define DEVCTRL_PFE 0x00000080 /* Pattern Filtering Enable */
#define DEVCTRL_IPP 0x00000400 /* Internal EPHY Present */
#define DEVCTRL_EPR 0x00008000 /* EPHY Reset */
#define DEVCTRL_PME 0x00001000 /* PHY Mode Enable */
#define DEVCTRL_PMCE 0x00002000 /* PHY Mode Clocks Enable */
#define DEVCTRL_PADDR 0x0007c000 /* PHY Address */
#define DEVCTRL_PADDR_SHIFT 18
#define B44_BIST_STAT 0x000CUL /* Built-In Self-Test Status */
#define B44_WKUP_LEN 0x0010UL /* Wakeup Length */
#define WKUP_LEN_P0_MASK 0x0000007f /* Pattern 0 */
#define WKUP_LEN_D0 0x00000080
#define WKUP_LEN_P1_MASK 0x00007f00 /* Pattern 1 */
#define WKUP_LEN_P1_SHIFT 8
#define WKUP_LEN_D1 0x00008000
#define WKUP_LEN_P2_MASK 0x007f0000 /* Pattern 2 */
#define WKUP_LEN_P2_SHIFT 16
#define WKUP_LEN_D2 0x00000000
#define WKUP_LEN_P3_MASK 0x7f000000 /* Pattern 3 */
#define WKUP_LEN_P3_SHIFT 24
#define WKUP_LEN_D3 0x80000000
#define B44_ISTAT 0x0020UL /* Interrupt Status */
#define ISTAT_PME 0x00000040 /* Power Management Event */
#define ISTAT_TO 0x00000080 /* General Purpose Timeout */
#define ISTAT_DSCE 0x00000400 /* Descriptor Error */
#define ISTAT_DATAE 0x00000800 /* Data Error */
#define ISTAT_DPE 0x00001000 /* Descr. Protocol Error */
#define ISTAT_RDU 0x00002000 /* Receive Descr. Underflow */
#define ISTAT_RFO 0x00004000 /* Receive FIFO Overflow */
#define ISTAT_TFU 0x00008000 /* Transmit FIFO Underflow */
#define ISTAT_RX 0x00010000 /* RX Interrupt */
#define ISTAT_TX 0x01000000 /* TX Interrupt */
#define ISTAT_EMAC 0x04000000 /* EMAC Interrupt */
#define ISTAT_MII_WRITE 0x08000000 /* MII Write Interrupt */
#define ISTAT_MII_READ 0x10000000 /* MII Read Interrupt */
#define ISTAT_ERRORS (ISTAT_DSCE|ISTAT_DATAE|ISTAT_DPE|ISTAT_RDU|ISTAT_RFO|ISTAT_TFU)
#define B44_IMASK 0x0024UL /* Interrupt Mask */
#define IMASK_DEF (ISTAT_ERRORS | ISTAT_TO | ISTAT_RX | ISTAT_TX)
#define B44_GPTIMER 0x0028UL /* General Purpose Timer */
#define B44_FILT_ADDR 0x0090UL /* ENET Filter Address */
#define B44_FILT_DATA 0x0094UL /* ENET Filter Data */
#define B44_TXBURST 0x00A0UL /* TX Max Burst Length */
#define B44_RXBURST 0x00A4UL /* RX Max Burst Length */
#define B44_MAC_CTRL 0x00A8UL /* MAC Control */
#define MAC_CTRL_CRC32_ENAB 0x00000001 /* CRC32 Generation Enable */
#define MAC_CTRL_PHY_PDOWN 0x00000004 /* Onchip EPHY Powerdown */
#define MAC_CTRL_PHY_EDET 0x00000008 /* Onchip EPHY Energy Detected */
#define MAC_CTRL_PHY_LEDCTRL 0x000000e0 /* Onchip EPHY LED Control */
#define MAC_CTRL_PHY_LEDCTRL_SHIFT 5
#define B44_MAC_FLOW 0x00ACUL /* MAC Flow Control */
#define MAC_FLOW_RX_HI_WATER 0x000000ff /* Receive FIFO HI Water Mark */
#define MAC_FLOW_PAUSE_ENAB 0x00008000 /* Enable Pause Frame Generation */
#define B44_RCV_LAZY 0x0100UL /* Lazy Interrupt Control */
#define RCV_LAZY_TO_MASK 0x00ffffff /* Timeout */
#define RCV_LAZY_FC_MASK 0xff000000 /* Frame Count */
#define RCV_LAZY_FC_SHIFT 24
#define B44_DMATX_CTRL 0x0200UL /* DMA TX Control */
#define DMATX_CTRL_ENABLE 0x00000001 /* Enable */
#define DMATX_CTRL_SUSPEND 0x00000002 /* Suepend Request */
#define DMATX_CTRL_LPBACK 0x00000004 /* Loopback Enable */
#define DMATX_CTRL_FAIRPRIOR 0x00000008 /* Fair Priority */
#define DMATX_CTRL_FLUSH 0x00000010 /* Flush Request */
#define B44_DMATX_ADDR 0x0204UL /* DMA TX Descriptor Ring Address */
#define B44_DMATX_PTR 0x0208UL /* DMA TX Last Posted Descriptor */
#define B44_DMATX_STAT 0x020CUL /* DMA TX Current Active Desc. + Status */
#define DMATX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */
#define DMATX_STAT_SMASK 0x0000f000 /* State Mask */
#define DMATX_STAT_SDISABLED 0x00000000 /* State Disabled */
#define DMATX_STAT_SACTIVE 0x00001000 /* State Active */
#define DMATX_STAT_SIDLE 0x00002000 /* State Idle Wait */
#define DMATX_STAT_SSTOPPED 0x00003000 /* State Stopped */
#define DMATX_STAT_SSUSP 0x00004000 /* State Suspend Pending */
#define DMATX_STAT_EMASK 0x000f0000 /* Error Mask */
#define DMATX_STAT_ENONE 0x00000000 /* Error None */
#define DMATX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */
#define DMATX_STAT_EDFU 0x00020000 /* Error Data FIFO Underrun */
#define DMATX_STAT_EBEBR 0x00030000 /* Error Bus Error on Buffer Read */
#define DMATX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */
#define DMATX_STAT_FLUSHED 0x00100000 /* Flushed */
#define B44_DMARX_CTRL 0x0210UL /* DMA RX Control */
#define DMARX_CTRL_ENABLE 0x00000001 /* Enable */
#define DMARX_CTRL_ROMASK 0x000000fe /* Receive Offset Mask */
#define DMARX_CTRL_ROSHIFT 1 /* Receive Offset Shift */
#define B44_DMARX_ADDR 0x0214UL /* DMA RX Descriptor Ring Address */
#define B44_DMARX_PTR 0x0218UL /* DMA RX Last Posted Descriptor */
#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */
#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */
#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */
#define DMARX_STAT_SDISABLED 0x00000000 /* State Disbaled */
#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */
#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */
#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */
#define DMARX_STAT_EMASK 0x000f0000 /* Error Mask */
#define DMARX_STAT_ENONE 0x00000000 /* Error None */
#define DMARX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */
#define DMARX_STAT_EDFO 0x00020000 /* Error Data FIFO Overflow */
#define DMARX_STAT_EBEBW 0x00030000 /* Error Bus Error on Buffer Write */
#define DMARX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */
#define B44_DMAFIFO_AD 0x0220UL /* DMA FIFO Diag Address */
#define DMAFIFO_AD_OMASK 0x0000ffff /* Offset Mask */
#define DMAFIFO_AD_SMASK 0x000f0000 /* Select Mask */
#define DMAFIFO_AD_SXDD 0x00000000 /* Select Transmit DMA Data */
#define DMAFIFO_AD_SXDP 0x00010000 /* Select Transmit DMA Pointers */
#define DMAFIFO_AD_SRDD 0x00040000 /* Select Receive DMA Data */
#define DMAFIFO_AD_SRDP 0x00050000 /* Select Receive DMA Pointers */
#define DMAFIFO_AD_SXFD 0x00080000 /* Select Transmit FIFO Data */
#define DMAFIFO_AD_SXFP 0x00090000 /* Select Transmit FIFO Pointers */
#define DMAFIFO_AD_SRFD 0x000c0000 /* Select Receive FIFO Data */
#define DMAFIFO_AD_SRFP 0x000c0000 /* Select Receive FIFO Pointers */
#define B44_DMAFIFO_LO 0x0224UL /* DMA FIFO Diag Low Data */
#define B44_DMAFIFO_HI 0x0228UL /* DMA FIFO Diag High Data */
#define B44_RXCONFIG 0x0400UL /* EMAC RX Config */
#define RXCONFIG_DBCAST 0x00000001 /* Disable Broadcast */
#define RXCONFIG_ALLMULTI 0x00000002 /* Accept All Multicast */
#define RXCONFIG_NORX_WHILE_TX 0x00000004 /* Receive Disable While Transmitting */
#define RXCONFIG_PROMISC 0x00000008 /* Promiscuous Enable */
#define RXCONFIG_LPBACK 0x00000010 /* Loopback Enable */
#define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */
#define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */
#define RXCONFIG_RFILT 0x00000080 /* Reject Filter */
#define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */
#define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */
#define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */
#define MDIO_CTRL_MAXF_MASK 0x0000007f /* MDC Frequency */
#define MDIO_CTRL_PREAMBLE 0x00000080 /* MII Preamble Enable */
#define B44_MDIO_DATA 0x0414UL /* EMAC MDIO Data */
#define MDIO_DATA_DATA 0x0000ffff /* R/W Data */
#define MDIO_DATA_TA_MASK 0x00030000 /* Turnaround Value */
#define MDIO_DATA_TA_SHIFT 16
#define MDIO_TA_VALID 2
#define MDIO_DATA_RA_MASK 0x007c0000 /* Register Address */
#define MDIO_DATA_RA_SHIFT 18
#define MDIO_DATA_PMD_MASK 0x0f800000 /* Physical Media Device */
#define MDIO_DATA_PMD_SHIFT 23
#define MDIO_DATA_OP_MASK 0x30000000 /* Opcode */
#define MDIO_DATA_OP_SHIFT 28
#define MDIO_OP_WRITE 1
#define MDIO_OP_READ 2
#define MDIO_DATA_SB_MASK 0xc0000000 /* Start Bits */
#define MDIO_DATA_SB_SHIFT 30
#define MDIO_DATA_SB_START 0x10000000 /* Start Of Frame */
#define B44_EMAC_IMASK 0x0418UL /* EMAC Interrupt Mask */
#define B44_EMAC_ISTAT 0x041CUL /* EMAC Interrupt Status */
#define EMAC_INT_MII 0x00000001 /* MII MDIO Interrupt */
#define EMAC_INT_MIB 0x00000002 /* MIB Interrupt */
#define EMAC_INT_FLOW 0x00000003 /* Flow Control Interrupt */
#define B44_CAM_DATA_LO 0x0420UL /* EMAC CAM Data Low */
#define B44_CAM_DATA_HI 0x0424UL /* EMAC CAM Data High */
#define CAM_DATA_HI_VALID 0x00010000 /* Valid Bit */
#define B44_CAM_CTRL 0x0428UL /* EMAC CAM Control */
#define CAM_CTRL_ENABLE 0x00000001 /* CAM Enable */
#define CAM_CTRL_MSEL 0x00000002 /* Mask Select */
#define CAM_CTRL_READ 0x00000004 /* Read */
#define CAM_CTRL_WRITE 0x00000008 /* Read */
#define CAM_CTRL_INDEX_MASK 0x003f0000 /* Index Mask */
#define CAM_CTRL_INDEX_SHIFT 16
#define CAM_CTRL_BUSY 0x80000000 /* CAM Busy */
#define B44_ENET_CTRL 0x042CUL /* EMAC ENET Control */
#define ENET_CTRL_ENABLE 0x00000001 /* EMAC Enable */
#define ENET_CTRL_DISABLE 0x00000002 /* EMAC Disable */
#define ENET_CTRL_SRST 0x00000004 /* EMAC Soft Reset */
#define ENET_CTRL_EPSEL 0x00000008 /* External PHY Select */
#define B44_TX_CTRL 0x0430UL /* EMAC TX Control */
#define TX_CTRL_DUPLEX 0x00000001 /* Full Duplex */
#define TX_CTRL_FMODE 0x00000002 /* Flow Mode */
#define TX_CTRL_SBENAB 0x00000004 /* Single Backoff Enable */
#define TX_CTRL_SMALL_SLOT 0x00000008 /* Small Slottime */
#define B44_TX_WMARK 0x0434UL /* EMAC TX Watermark */
#define B44_MIB_CTRL 0x0438UL /* EMAC MIB Control */
#define MIB_CTRL_CLR_ON_READ 0x00000001 /* Autoclear on Read */
#define B44_TX_GOOD_O 0x0500UL /* MIB TX Good Octets */
#define B44_TX_GOOD_P 0x0504UL /* MIB TX Good Packets */
#define B44_TX_O 0x0508UL /* MIB TX Octets */
#define B44_TX_P 0x050CUL /* MIB TX Packets */
#define B44_TX_BCAST 0x0510UL /* MIB TX Broadcast Packets */
#define B44_TX_MCAST 0x0514UL /* MIB TX Multicast Packets */
#define B44_TX_64 0x0518UL /* MIB TX <= 64 byte Packets */
#define B44_TX_65_127 0x051CUL /* MIB TX 65 to 127 byte Packets */
#define B44_TX_128_255 0x0520UL /* MIB TX 128 to 255 byte Packets */
#define B44_TX_256_511 0x0524UL /* MIB TX 256 to 511 byte Packets */
#define B44_TX_512_1023 0x0528UL /* MIB TX 512 to 1023 byte Packets */
#define B44_TX_1024_MAX 0x052CUL /* MIB TX 1024 to max byte Packets */
#define B44_TX_JABBER 0x0530UL /* MIB TX Jabber Packets */
#define B44_TX_OSIZE 0x0534UL /* MIB TX Oversize Packets */
#define B44_TX_FRAG 0x0538UL /* MIB TX Fragment Packets */
#define B44_TX_URUNS 0x053CUL /* MIB TX Underruns */
#define B44_TX_TCOLS 0x0540UL /* MIB TX Total Collisions */
#define B44_TX_SCOLS 0x0544UL /* MIB TX Single Collisions */
#define B44_TX_MCOLS 0x0548UL /* MIB TX Multiple Collisions */
#define B44_TX_ECOLS 0x054CUL /* MIB TX Excessive Collisions */
#define B44_TX_LCOLS 0x0550UL /* MIB TX Late Collisions */
#define B44_TX_DEFERED 0x0554UL /* MIB TX Defered Packets */
#define B44_TX_CLOST 0x0558UL /* MIB TX Carrier Lost */
#define B44_TX_PAUSE 0x055CUL /* MIB TX Pause Packets */
#define B44_RX_GOOD_O 0x0580UL /* MIB RX Good Octets */
#define B44_RX_GOOD_P 0x0584UL /* MIB RX Good Packets */
#define B44_RX_O 0x0588UL /* MIB RX Octets */
#define B44_RX_P 0x058CUL /* MIB RX Packets */
#define B44_RX_BCAST 0x0590UL /* MIB RX Broadcast Packets */
#define B44_RX_MCAST 0x0594UL /* MIB RX Multicast Packets */
#define B44_RX_64 0x0598UL /* MIB RX <= 64 byte Packets */
#define B44_RX_65_127 0x059CUL /* MIB RX 65 to 127 byte Packets */
#define B44_RX_128_255 0x05A0UL /* MIB RX 128 to 255 byte Packets */
#define B44_RX_256_511 0x05A4UL /* MIB RX 256 to 511 byte Packets */
#define B44_RX_512_1023 0x05A8UL /* MIB RX 512 to 1023 byte Packets */
#define B44_RX_1024_MAX 0x05ACUL /* MIB RX 1024 to max byte Packets */
#define B44_RX_JABBER 0x05B0UL /* MIB RX Jabber Packets */
#define B44_RX_OSIZE 0x05B4UL /* MIB RX Oversize Packets */
#define B44_RX_FRAG 0x05B8UL /* MIB RX Fragment Packets */
#define B44_RX_MISS 0x05BCUL /* MIB RX Missed Packets */
#define B44_RX_CRCA 0x05C0UL /* MIB RX CRC Align Errors */
#define B44_RX_USIZE 0x05C4UL /* MIB RX Undersize Packets */
#define B44_RX_CRC 0x05C8UL /* MIB RX CRC Errors */
#define B44_RX_ALIGN 0x05CCUL /* MIB RX Align Errors */
#define B44_RX_SYM 0x05D0UL /* MIB RX Symbol Errors */
#define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */
#define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */
#define B44_SBIPSFLAG 0x0F08UL /* SB Initiator Port OCP Slave Flag */
#define SBIPSFLAG_IMASK1 0x0000003f /* Which sbflags --> mips interrupt 1 */
#define SBIPSFLAG_ISHIFT1 0
#define SBIPSFLAG_IMASK2 0x00003f00 /* Which sbflags --> mips interrupt 2 */
#define SBIPSFLAG_ISHIFT2 8
#define SBIPSFLAG_IMASK3 0x003f0000 /* Which sbflags --> mips interrupt 3 */
#define SBIPSFLAG_ISHIFT3 16
#define SBIPSFLAG_IMASK4 0x3f000000 /* Which sbflags --> mips interrupt 4 */
#define SBIPSFLAG_ISHIFT4 24
#define B44_SBTPSFLAG 0x0F18UL /* SB Target Port OCP Slave Flag */
#define SBTPS_NUM0_MASK 0x0000003f
#define SBTPS_F0EN0 0x00000040
#define B44_SBADMATCH3 0x0F60UL /* SB Address Match 3 */
#define B44_SBADMATCH2 0x0F68UL /* SB Address Match 2 */
#define B44_SBADMATCH1 0x0F70UL /* SB Address Match 1 */
#define B44_SBIMSTATE 0x0F90UL /* SB Initiator Agent State */
#define SBIMSTATE_PC 0x0000000f /* Pipe Count */
#define SBIMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */
#define SBIMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */
#define SBIMSTATE_AP_TS 0x00000010 /* Use timeslices only */
#define SBIMSTATE_AP_TK 0x00000020 /* Use token only */
#define SBIMSTATE_AP_RSV 0x00000030 /* Reserved */
#define SBIMSTATE_IBE 0x00020000 /* In Band Error */
#define SBIMSTATE_TO 0x00040000 /* Timeout */
#define B44_SBINTVEC 0x0F94UL /* SB Interrupt Mask */
#define SBINTVEC_PCI 0x00000001 /* Enable interrupts for PCI */
#define SBINTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */
#define SBINTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */
#define SBINTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */
#define SBINTVEC_USB 0x00000010 /* Enable interrupts for usb */
#define SBINTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */
#define SBINTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */
#define B44_SBTMSLOW 0x0F98UL /* SB Target State Low */
#define SBTMSLOW_RESET 0x00000001 /* Reset */
#define SBTMSLOW_REJECT 0x00000002 /* Reject */
#define SBTMSLOW_CLOCK 0x00010000 /* Clock Enable */
#define SBTMSLOW_FGC 0x00020000 /* Force Gated Clocks On */
#define SBTMSLOW_PE 0x40000000 /* Power Management Enable */
#define SBTMSLOW_BE 0x80000000 /* BIST Enable */
#define B44_SBTMSHIGH 0x0F9CUL /* SB Target State High */
#define SBTMSHIGH_SERR 0x00000001 /* S-error */
#define SBTMSHIGH_INT 0x00000002 /* Interrupt */
#define SBTMSHIGH_BUSY 0x00000004 /* Busy */
#define SBTMSHIGH_GCR 0x20000000 /* Gated Clock Request */
#define SBTMSHIGH_BISTF 0x40000000 /* BIST Failed */
#define SBTMSHIGH_BISTD 0x80000000 /* BIST Done */
#define B44_SBBWA0 0x0FA0UL /* SB Bandwidth Allocation Table 0 */
#define SBBWA0_TAB0_MASK 0x0000ffff /* Lookup Table 0 */
#define SBBWA0_TAB0_SHIFT 0
#define SBBWA0_TAB1_MASK 0xffff0000 /* Lookup Table 0 */
#define SBBWA0_TAB1_SHIFT 16
#define B44_SBIMCFGLOW 0x0FA8UL /* SB Initiator Configuration Low */
#define SBIMCFGLOW_STO_MASK 0x00000003 /* Service Timeout */
#define SBIMCFGLOW_RTO_MASK 0x00000030 /* Request Timeout */
#define SBIMCFGLOW_RTO_SHIFT 4
#define SBIMCFGLOW_CID_MASK 0x00ff0000 /* Connection ID */
#define SBIMCFGLOW_CID_SHIFT 16
#define B44_SBIMCFGHIGH 0x0FACUL /* SB Initiator Configuration High */
#define SBIMCFGHIGH_IEM_MASK 0x0000000c /* Inband Error Mode */
#define SBIMCFGHIGH_TEM_MASK 0x00000030 /* Timeout Error Mode */
#define SBIMCFGHIGH_TEM_SHIFT 4
#define SBIMCFGHIGH_BEM_MASK 0x000000c0 /* Bus Error Mode */
#define SBIMCFGHIGH_BEM_SHIFT 6
#define B44_SBADMATCH0 0x0FB0UL /* SB Address Match 0 */
#define SBADMATCH0_TYPE_MASK 0x00000003 /* Address Type */
#define SBADMATCH0_AD64 0x00000004 /* Reserved */
#define SBADMATCH0_AI0_MASK 0x000000f8 /* Type0 Size */
#define SBADMATCH0_AI0_SHIFT 3
#define SBADMATCH0_AI1_MASK 0x000001f8 /* Type1 Size */
#define SBADMATCH0_AI1_SHIFT 3
#define SBADMATCH0_AI2_MASK 0x000001f8 /* Type2 Size */
#define SBADMATCH0_AI2_SHIFT 3
#define SBADMATCH0_ADEN 0x00000400 /* Enable */
#define SBADMATCH0_ADNEG 0x00000800 /* Negative Decode */
#define SBADMATCH0_BS0_MASK 0xffffff00 /* Type0 Base Address */
#define SBADMATCH0_BS0_SHIFT 8
#define SBADMATCH0_BS1_MASK 0xfffff000 /* Type1 Base Address */
#define SBADMATCH0_BS1_SHIFT 12
#define SBADMATCH0_BS2_MASK 0xffff0000 /* Type2 Base Address */
#define SBADMATCH0_BS2_SHIFT 16
#define B44_SBTMCFGLOW 0x0FB8UL /* SB Target Configuration Low */
#define SBTMCFGLOW_CD_MASK 0x000000ff /* Clock Divide Mask */
#define SBTMCFGLOW_CO_MASK 0x0000f800 /* Clock Offset Mask */
#define SBTMCFGLOW_CO_SHIFT 11
#define SBTMCFGLOW_IF_MASK 0x00fc0000 /* Interrupt Flags Mask */
#define SBTMCFGLOW_IF_SHIFT 18
#define SBTMCFGLOW_IM_MASK 0x03000000 /* Interrupt Mode Mask */
#define SBTMCFGLOW_IM_SHIFT 24
#define B44_SBTMCFGHIGH 0x0FBCUL /* SB Target Configuration High */
#define SBTMCFGHIGH_BM_MASK 0x00000003 /* Busy Mode */
#define SBTMCFGHIGH_RM_MASK 0x0000000C /* Retry Mode */
#define SBTMCFGHIGH_RM_SHIFT 2
#define SBTMCFGHIGH_SM_MASK 0x00000030 /* Stop Mode */
#define SBTMCFGHIGH_SM_SHIFT 4
#define SBTMCFGHIGH_EM_MASK 0x00000300 /* Error Mode */
#define SBTMCFGHIGH_EM_SHIFT 8
#define SBTMCFGHIGH_IM_MASK 0x00000c00 /* Interrupt Mode */
#define SBTMCFGHIGH_IM_SHIFT 10
#define B44_SBBCFG 0x0FC0UL /* SB Broadcast Configuration */
#define SBBCFG_LAT_MASK 0x00000003 /* SB Latency */
#define SBBCFG_MAX0_MASK 0x000f0000 /* MAX Counter 0 */
#define SBBCFG_MAX0_SHIFT 16
#define SBBCFG_MAX1_MASK 0x00f00000 /* MAX Counter 1 */
#define SBBCFG_MAX1_SHIFT 20
#define B44_SBBSTATE 0x0FC8UL /* SB Broadcast State */
#define SBBSTATE_SRD 0x00000001 /* ST Reg Disable */
#define SBBSTATE_HRD 0x00000002 /* Hold Reg Disable */
#define B44_SBACTCNFG 0x0FD8UL /* SB Activate Configuration */
#define B44_SBFLAGST 0x0FE8UL /* SB Current SBFLAGS */
#define B44_SBIDLOW 0x0FF8UL /* SB Identification Low */
#define SBIDLOW_CS_MASK 0x00000003 /* Config Space Mask */
#define SBIDLOW_AR_MASK 0x00000038 /* Num Address Ranges Supported */
#define SBIDLOW_AR_SHIFT 3
#define SBIDLOW_SYNCH 0x00000040 /* Sync */
#define SBIDLOW_INIT 0x00000080 /* Initiator */
#define SBIDLOW_MINLAT_MASK 0x00000f00 /* Minimum Backplane Latency */
#define SBIDLOW_MINLAT_SHIFT 8
#define SBIDLOW_MAXLAT_MASK 0x0000f000 /* Maximum Backplane Latency */
#define SBIDLOW_MAXLAT_SHIFT 12
#define SBIDLOW_FIRST 0x00010000 /* This Initiator is First */
#define SBIDLOW_CW_MASK 0x000c0000 /* Cycle Counter Width */
#define SBIDLOW_CW_SHIFT 18
#define SBIDLOW_TP_MASK 0x00f00000 /* Target Ports */
#define SBIDLOW_TP_SHIFT 20
#define SBIDLOW_IP_MASK 0x0f000000 /* Initiator Ports */
#define SBIDLOW_IP_SHIFT 24
#define B44_SBIDHIGH 0x0FFCUL /* SB Identification High */
#define SBIDHIGH_RC_MASK 0x0000000f /* Revision Code */
#define SBIDHIGH_CC_MASK 0x0000fff0 /* Core Code */
#define SBIDHIGH_CC_SHIFT 4
#define SBIDHIGH_VC_MASK 0xffff0000 /* Vendor Code */
#define SBIDHIGH_VC_SHIFT 16
#define CORE_CODE_ILINE20 0x801
#define CORE_CODE_SDRAM 0x803
#define CORE_CODE_PCI 0x804
#define CORE_CODE_MIPS 0x805
#define CORE_CODE_ENET 0x806
#define CORE_CODE_CODEC 0x807
#define CORE_CODE_USB 0x808
#define CORE_CODE_ILINE100 0x80a
#define CORE_CODE_EXTIF 0x811
/* SSB PCI config space registers. */
#define SSB_BAR0_WIN 0x80
#define SSB_BAR1_WIN 0x84
#define SSB_SPROM_CONTROL 0x88
#define SSB_BAR1_CONTROL 0x8c
/* SSB core and hsot control registers. */
#define SSB_CONTROL 0x0000UL
#define SSB_ARBCONTROL 0x0010UL
#define SSB_ISTAT 0x0020UL
#define SSB_IMASK 0x0024UL
#define SSB_MBOX 0x0028UL
#define SSB_BCAST_ADDR 0x0050UL
#define SSB_BCAST_DATA 0x0054UL
#define SSB_PCI_TRANS_0 0x0100UL
#define SSB_PCI_TRANS_1 0x0104UL
#define SSB_PCI_TRANS_2 0x0108UL
#define SSB_SPROM 0x0800UL
#define SSB_PCI_MEM 0x00000000
#define SSB_PCI_IO 0x00000001
#define SSB_PCI_CFG0 0x00000002
#define SSB_PCI_CFG1 0x00000003
#define SSB_PCI_PREF 0x00000004
#define SSB_PCI_BURST 0x00000008
#define SSB_PCI_MASK0 0xfc000000
#define SSB_PCI_MASK1 0xfc000000
#define SSB_PCI_MASK2 0xc0000000
#define br32(REG) readl(bp->regs + (REG))
#define bw32(REG,VAL) writel((VAL), bp->regs + (REG))
/* 4400 PHY registers */
#define B44_MII_AUXCTRL 24 /* Auxiliary Control */
#define MII_AUXCTRL_DUPLEX 0x0001 /* Full Duplex */
#define MII_AUXCTRL_SPEED 0x0002 /* 1=100Mbps, 0=10Mbps */
#define MII_AUXCTRL_FORCED 0x0004 /* Forced 10/100 */
#define B44_MII_ALEDCTRL 26 /* Activity LED */
#define MII_ALEDCTRL_ALLMSK 0x7fff
#define B44_MII_TLEDCTRL 27 /* Traffic Meter LED */
#define MII_TLEDCTRL_ENABLE 0x0040
/* XXX Add this to mii.h */
#ifndef ADVERTISE_PAUSE
#define ADVERTISE_PAUSE_CAP 0x0400
#endif
#ifndef ADVERTISE_PAUSE_ASYM
#define ADVERTISE_PAUSE_ASYM 0x0800
#endif
#ifndef LPA_PAUSE
#define LPA_PAUSE_CAP 0x0400
#endif
#ifndef LPA_PAUSE_ASYM
#define LPA_PAUSE_ASYM 0x0800
#endif
struct dma_desc {
u32 ctrl;
u32 addr;
};
/* There are only 12 bits in the DMA engine for descriptor offsetting
* so the table must be aligned on a boundry of this.
*/
#define DMA_TABLE_BYTES 4096
#define DESC_CTRL_LEN 0x00001fff
#define DESC_CTRL_CMASK 0x0ff00000 /* Core specific bits */
#define DESC_CTRL_EOT 0x10000000 /* End of Table */
#define DESC_CTRL_IOC 0x20000000 /* Interrupt On Completion */
#define DESC_CTRL_EOF 0x40000000 /* End of Frame */
#define DESC_CTRL_SOF 0x80000000 /* Start of Frame */
#define RX_COPY_THRESHOLD 256
struct rx_header {
u16 len;
u16 flags;
u16 pad[12];
};
#define RX_HEADER_LEN 28
#define RX_FLAG_OFIFO 0x00000001 /* FIFO Overflow */
#define RX_FLAG_CRCERR 0x00000002 /* CRC Error */
#define RX_FLAG_SERR 0x00000004 /* Receive Symbol Error */
#define RX_FLAG_ODD 0x00000008 /* Frame has odd number of nibbles */
#define RX_FLAG_LARGE 0x00000010 /* Frame is > RX MAX Length */
#define RX_FLAG_MCAST 0x00000020 /* Dest is Multicast Address */
#define RX_FLAG_BCAST 0x00000040 /* Dest is Broadcast Address */
#define RX_FLAG_MISS 0x00000080 /* Received due to promisc mode */
#define RX_FLAG_LAST 0x00000800 /* Last buffer in frame */
#define RX_FLAG_ERRORS (RX_FLAG_ODD | RX_FLAG_SERR | RX_FLAG_CRCERR | RX_FLAG_OFIFO)
struct ring_info {
struct sk_buff *skb;
DECLARE_PCI_UNMAP_ADDR(mapping);
};
#define B44_MCAST_TABLE_SIZE 32
/* SW copy of device statistics, kept up to date by periodic timer
* which probes HW values. Must have same relative layout as HW
* register above, because b44_stats_update depends upon this.
*/
struct b44_hw_stats {
u32 tx_good_octets, tx_good_pkts, tx_octets;
u32 tx_pkts, tx_broadcast_pkts, tx_multicast_pkts;
u32 tx_len_64, tx_len_65_to_127, tx_len_128_to_255;
u32 tx_len_256_to_511, tx_len_512_to_1023, tx_len_1024_to_max;
u32 tx_jabber_pkts, tx_oversize_pkts, tx_fragment_pkts;
u32 tx_underruns, tx_total_cols, tx_single_cols;
u32 tx_multiple_cols, tx_excessive_cols, tx_late_cols;
u32 tx_defered, tx_carrier_lost, tx_pause_pkts;
u32 __pad1[8];
u32 rx_good_octets, rx_good_pkts, rx_octets;
u32 rx_pkts, rx_broadcast_pkts, rx_multicast_pkts;
u32 rx_len_64, rx_len_65_to_127, rx_len_128_to_255;
u32 rx_len_256_to_511, rx_len_512_to_1023, rx_len_1024_to_max;
u32 rx_jabber_pkts, rx_oversize_pkts, rx_fragment_pkts;
u32 rx_missed_pkts, rx_crc_align_errs, rx_undersize;
u32 rx_crc_errs, rx_align_errs, rx_symbol_errs;
u32 rx_pause_pkts, rx_nonpause_pkts;
};
struct b44 {
spinlock_t lock;
u32 imask, istat;
struct dma_desc *rx_ring, *tx_ring;
u32 tx_prod, tx_cons;
u32 rx_prod, rx_cons;
struct ring_info *rx_buffers;
struct ring_info *tx_buffers;
u32 dma_offset;
u32 flags;
#define B44_FLAG_INIT_COMPLETE 0x00000001
#define B44_FLAG_BUGGY_TXPTR 0x00000002
#define B44_FLAG_REORDER_BUG 0x00000004
#define B44_FLAG_PAUSE_AUTO 0x00008000
#define B44_FLAG_FULL_DUPLEX 0x00010000
#define B44_FLAG_100_BASE_T 0x00020000
#define B44_FLAG_TX_PAUSE 0x00040000
#define B44_FLAG_RX_PAUSE 0x00080000
#define B44_FLAG_FORCE_LINK 0x00100000
#define B44_FLAG_ADV_10HALF 0x01000000
#define B44_FLAG_ADV_10FULL 0x02000000
#define B44_FLAG_ADV_100HALF 0x04000000
#define B44_FLAG_ADV_100FULL 0x08000000
#define B44_FLAG_INTERNAL_PHY 0x10000000
u32 rx_offset;
u32 msg_enable;
struct timer_list timer;
struct net_device_stats stats;
struct b44_hw_stats hw_stats;
unsigned long regs;
struct pci_dev *pdev;
struct net_device *dev;
dma_addr_t rx_ring_dma, tx_ring_dma;
u32 rx_pending;
u32 tx_pending;
u32 pci_cfg_state[64 / sizeof(u32)];
u8 phy_addr;
u8 mdc_port;
u8 core_unit;
};
#endif /* _B44_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment