Commit 0e396ee4 authored by Linus Torvalds's avatar Linus Torvalds

Manual merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/jgarzik/netdev-2.6.git

This is a fixed-up version of the broken "upstream-2.6.13" branch, where
I re-did the manual merge of drivers/net/r8169.c by hand, and made sure
the history is all good.
parents b8112df7 2089a0d3
Generic HDLC layer
Krzysztof Halasa <khc@pm.waw.pl>
January, 2003
Generic HDLC layer currently supports:
- Frame Relay (ANSI, CCITT and no LMI), with ARP support (no InARP).
Normal (routed) and Ethernet-bridged (Ethernet device emulation)
1. Frame Relay (ANSI, CCITT, Cisco and no LMI).
- Normal (routed) and Ethernet-bridged (Ethernet device emulation)
interfaces can share a single PVC.
- raw HDLC - either IP (IPv4) interface or Ethernet device emulation.
- Cisco HDLC,
- PPP (uses syncppp.c),
- X.25 (uses X.25 routines).
- ARP support (no InARP support in the kernel - there is an
experimental InARP user-space daemon available on:
http://www.kernel.org/pub/linux/utils/net/hdlc/).
2. raw HDLC - either IP (IPv4) interface or Ethernet device emulation.
3. Cisco HDLC.
4. PPP (uses syncppp.c).
5. X.25 (uses X.25 routines).
There are hardware drivers for the following cards:
- C101 by Moxa Technologies Co., Ltd.
- RISCom/N2 by SDL Communications Inc.
- and others, some not in the official kernel.
Generic HDLC is a protocol driver only - it needs a low-level driver
for your particular hardware.
Ethernet device emulation (using HDLC or Frame-Relay PVC) is compatible
with IEEE 802.1Q (VLANs) and 802.1D (Ethernet bridging).
......@@ -24,7 +24,7 @@ with IEEE 802.1Q (VLANs) and 802.1D (Ethernet bridging).
Make sure the hdlc.o and the hardware driver are loaded. It should
create a number of "hdlc" (hdlc0 etc) network devices, one for each
WAN port. You'll need the "sethdlc" utility, get it from:
http://hq.pm.waw.pl/hdlc/
http://www.kernel.org/pub/linux/utils/net/hdlc/
Compile sethdlc.c utility:
gcc -O2 -Wall -o sethdlc sethdlc.c
......@@ -52,12 +52,12 @@ Setting interface:
* v35 | rs232 | x21 | t1 | e1 - sets physical interface for a given port
if the card has software-selectable interfaces
loopback - activate hardware loopback (for testing only)
* clock ext - external clock (uses DTE RX and TX clock)
* clock int - internal clock (provides clock signal on DCE clock output)
* clock txint - TX internal, RX external (provides TX clock on DCE output)
* clock txfromrx - TX clock derived from RX clock (TX clock on DCE output)
* rate - sets clock rate in bps (not required for external clock or
for txfromrx)
* clock ext - both RX clock and TX clock external
* clock int - both RX clock and TX clock internal
* clock txint - RX clock external, TX clock internal
* clock txfromrx - RX clock external, TX clock derived from RX clock
* rate - sets clock rate in bps (for "int" or "txint" clock only)
Setting protocol:
......@@ -79,7 +79,7 @@ Setting protocol:
* x25 - sets X.25 mode
* fr - Frame Relay mode
lmi ansi / ccitt / none - LMI (link management) type
lmi ansi / ccitt / cisco / none - LMI (link management) type
dce - Frame Relay DCE (network) side LMI instead of default DTE (user).
It has nothing to do with clocks!
t391 - link integrity verification polling timer (in seconds) - user
......@@ -119,13 +119,14 @@ or
If you have a problem with N2 or C101 card, you can issue the "private"
command to see port's packet descriptor rings (in kernel logs):
If you have a problem with N2, C101 or PLX200SYN card, you can issue the
"private" command to see port's packet descriptor rings (in kernel logs):
sethdlc hdlc0 private
The hardware driver has to be build with CONFIG_HDLC_DEBUG_RINGS.
The hardware driver has to be build with #define DEBUG_RINGS.
Attaching this info to bug reports would be helpful. Anyway, let me know
if you have problems using this.
For patches and other info look at http://hq.pm.waw.pl/hdlc/
For patches and other info look at:
<http://www.kernel.org/pub/linux/utils/net/hdlc/>.
......@@ -47,7 +47,6 @@ ni52 <------------------ Buggy ------------------>
ni65 YES YES YES Software(#)
seeq NO NO NO N/A
sgiseek <------------------ Buggy ------------------>
sk_g16 NO NO YES N/A
smc-ultra YES YES YES Hardware
sunlance YES YES YES Hardware
tulip YES YES YES Hardware
......
......@@ -284,9 +284,6 @@ ppp.c:
seeq8005.c: *Not modularized*
(Probes ports: 0x300, 0x320, 0x340, 0x360)
sk_g16.c: *Not modularized*
(Probes ports: 0x100, 0x180, 0x208, 0x220m 0x288, 0x320, 0x328, 0x390)
skeleton.c: *Skeleton*
slhc.c:
......
......@@ -54,6 +54,7 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/netdevice.h>
......@@ -91,16 +92,17 @@ KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
static int debug = -1;
MODULE_PARM (debug, "i");
module_param(debug, int, 0);
MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static int multicast_filter_limit = 32;
MODULE_PARM (multicast_filter_limit, "i");
module_param(multicast_filter_limit, int, 0);
MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
#define PFX DRV_NAME ": "
......@@ -186,6 +188,9 @@ enum {
RingEnd = (1 << 30), /* End of descriptor ring */
FirstFrag = (1 << 29), /* First segment of a packet */
LastFrag = (1 << 28), /* Final segment of a packet */
LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
MSSShift = 16, /* MSS value position */
MSSMask = 0xfff, /* MSS value: 11 bits */
TxError = (1 << 23), /* Tx error summary */
RxError = (1 << 20), /* Rx error summary */
IPCS = (1 << 18), /* Calculate IP checksum */
......@@ -312,7 +317,7 @@ struct cp_desc {
struct ring_info {
struct sk_buff *skb;
dma_addr_t mapping;
unsigned frag;
u32 len;
};
struct cp_dma_stats {
......@@ -394,6 +399,9 @@ struct cp_private {
static void __cp_set_rx_mode (struct net_device *dev);
static void cp_tx (struct cp_private *cp);
static void cp_clean_rings (struct cp_private *cp);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cp_poll_controller(struct net_device *dev);
#endif
static struct pci_device_id cp_pci_tbl[] = {
{ PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139,
......@@ -688,6 +696,19 @@ cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling receive - used by netconsole and other diagnostic tools
* to allow network i/o with interrupts disabled.
*/
static void cp_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
cp_interrupt(dev->irq, dev, NULL);
enable_irq(dev->irq);
}
#endif
static void cp_tx (struct cp_private *cp)
{
unsigned tx_head = cp->tx_head;
......@@ -707,7 +728,7 @@ static void cp_tx (struct cp_private *cp)
BUG();
pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
skb->len, PCI_DMA_TODEVICE);
cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
if (status & LastFrag) {
if (status & (TxError | TxFIFOUnder)) {
......@@ -749,10 +770,11 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
unsigned entry;
u32 eor;
u32 eor, flags;
#if CP_VLAN_TAG_USED
u32 vlan_tag = 0;
#endif
int mss = 0;
spin_lock_irq(&cp->lock);
......@@ -772,6 +794,9 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
entry = cp->tx_head;
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
if (dev->features & NETIF_F_TSO)
mss = skb_shinfo(skb)->tso_size;
if (skb_shinfo(skb)->nr_frags == 0) {
struct cp_desc *txd = &cp->tx_ring[entry];
u32 len;
......@@ -783,26 +808,26 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
txd->addr = cpu_to_le64(mapping);
wmb();
if (skb->ip_summed == CHECKSUM_HW) {
flags = eor | len | DescOwn | FirstFrag | LastFrag;
if (mss)
flags |= LargeSend | ((mss & MSSMask) << MSSShift);
else if (skb->ip_summed == CHECKSUM_HW) {
const struct iphdr *ip = skb->nh.iph;
if (ip->protocol == IPPROTO_TCP)
txd->opts1 = cpu_to_le32(eor | len | DescOwn |
FirstFrag | LastFrag |
IPCS | TCPCS);
flags |= IPCS | TCPCS;
else if (ip->protocol == IPPROTO_UDP)
txd->opts1 = cpu_to_le32(eor | len | DescOwn |
FirstFrag | LastFrag |
IPCS | UDPCS);
flags |= IPCS | UDPCS;
else
BUG();
} else
txd->opts1 = cpu_to_le32(eor | len | DescOwn |
FirstFrag | LastFrag);
WARN_ON(1); /* we need a WARN() */
}
txd->opts1 = cpu_to_le32(flags);
wmb();
cp->tx_skb[entry].skb = skb;
cp->tx_skb[entry].mapping = mapping;
cp->tx_skb[entry].frag = 0;
cp->tx_skb[entry].len = len;
entry = NEXT_TX(entry);
} else {
struct cp_desc *txd;
......@@ -820,7 +845,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
first_len, PCI_DMA_TODEVICE);
cp->tx_skb[entry].skb = skb;
cp->tx_skb[entry].mapping = first_mapping;
cp->tx_skb[entry].frag = 1;
cp->tx_skb[entry].len = first_len;
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
......@@ -836,16 +861,19 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
len, PCI_DMA_TODEVICE);
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
if (skb->ip_summed == CHECKSUM_HW) {
ctrl = eor | len | DescOwn | IPCS;
ctrl = eor | len | DescOwn;
if (mss)
ctrl |= LargeSend |
((mss & MSSMask) << MSSShift);
else if (skb->ip_summed == CHECKSUM_HW) {
if (ip->protocol == IPPROTO_TCP)
ctrl |= TCPCS;
ctrl |= IPCS | TCPCS;
else if (ip->protocol == IPPROTO_UDP)
ctrl |= UDPCS;
ctrl |= IPCS | UDPCS;
else
BUG();
} else
ctrl = eor | len | DescOwn;
}
if (frag == skb_shinfo(skb)->nr_frags - 1)
ctrl |= LastFrag;
......@@ -860,7 +888,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
cp->tx_skb[entry].skb = skb;
cp->tx_skb[entry].mapping = mapping;
cp->tx_skb[entry].frag = frag + 2;
cp->tx_skb[entry].len = len;
entry = NEXT_TX(entry);
}
......@@ -1074,7 +1102,6 @@ static int cp_refill_rx (struct cp_private *cp)
cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
cp->rx_skb[i].skb = skb;
cp->rx_skb[i].frag = 0;
cp->rx_ring[i].opts2 = 0;
cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping);
......@@ -1126,9 +1153,6 @@ static void cp_clean_rings (struct cp_private *cp)
{
unsigned i;
memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
for (i = 0; i < CP_RX_RING_SIZE; i++) {
if (cp->rx_skb[i].skb) {
pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping,
......@@ -1140,13 +1164,18 @@ static void cp_clean_rings (struct cp_private *cp)
for (i = 0; i < CP_TX_RING_SIZE; i++) {
if (cp->tx_skb[i].skb) {
struct sk_buff *skb = cp->tx_skb[i].skb;
pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping,
skb->len, PCI_DMA_TODEVICE);
cp->tx_skb[i].len, PCI_DMA_TODEVICE);
if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag)
dev_kfree_skb(skb);
cp->net_stats.tx_dropped++;
}
}
memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE);
memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE);
}
......@@ -1538,6 +1567,8 @@ static struct ethtool_ops cp_ethtool_ops = {
.set_tx_csum = ethtool_op_set_tx_csum, /* local! */
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_regs = cp_get_regs,
.get_wol = cp_get_wol,
.set_wol = cp_set_wol,
......@@ -1749,6 +1780,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->get_stats = cp_get_stats;
dev->do_ioctl = cp_ioctl;
dev->poll = cp_rx_poll;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = cp_poll_controller;
#endif
dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */
#ifdef BROKEN
dev->change_mtu = cp_change_mtu;
......@@ -1768,6 +1802,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
#if 0 /* disabled by default until verified */
dev->features |= NETIF_F_TSO;
#endif
dev->irq = pdev->irq;
rc = register_netdev(dev);
......
This diff is collapsed.
......@@ -824,6 +824,18 @@ config SMC9194
<file:Documentation/networking/net-modules.txt>. The module
will be called smc9194.
config DM9000
tristate "DM9000 support"
depends on ARM && NET_ETHERNET
select CRC32
select MII
---help---
Support for DM9000 chipset.
To compile this driver as a module, choose M here and read
<file:Documentation/networking/net-modules.txt>. The module will be
called dm9000.
config NET_VENDOR_RACAL
bool "Racal-Interlan (Micom) NI cards"
depends on NET_ETHERNET && ISA
......@@ -989,21 +1001,6 @@ config EEXPRESS_PRO
<file:Documentation/networking/net-modules.txt>. The module
will be called eepro.
config FMV18X
tristate "FMV-181/182/183/184 support (OBSOLETE)"
depends on NET_ISA && OBSOLETE
---help---
If you have a Fujitsu FMV-181/182/183/184 network (Ethernet) card,
say Y and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
If you use an FMV-183 or FMV-184 and it is not working, you may need
to disable Plug & Play mode of the card.
To compile this driver as a module, choose M here and read
<file:Documentation/networking/net-modules.txt>. The module
will be called fmv18x.
config HPLAN_PLUS
tristate "HP PCLAN+ (27247B and 27252A) support"
depends on NET_ISA
......@@ -1092,14 +1089,6 @@ config SEEQ8005
<file:Documentation/networking/net-modules.txt>. The module
will be called seeq8005.
config SK_G16
tristate "SK_G16 support (OBSOLETE)"
depends on NET_ISA && OBSOLETE
help
If you have a network (Ethernet) card of this type, say Y and read
the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
config SKMC
tristate "SKnet MCA support"
depends on NET_ETHERNET && MCA && BROKEN
......@@ -1932,6 +1921,18 @@ config R8169_VLAN
If in doubt, say Y.
config SKGE
tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)"
depends on PCI && EXPERIMENTAL
select CRC32
---help---
This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
and related Gigabit Ethernet adapters. It is a new smaller driver
driver with better performance and more complete ethtool support.
It does not support the link failover and network management
features that "portable" vendor supplied sk98lin driver does.
config SK98LIN
tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support"
depends on PCI
......
......@@ -53,6 +53,7 @@ obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BNX2) += bnx2.o
obj-$(CONFIG_TC35815) += tc35815.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SK98LIN) += sk98lin/
obj-$(CONFIG_SKFP) += skfp/
obj-$(CONFIG_VIA_RHINE) += via-rhine.o
......@@ -74,7 +75,6 @@ obj-$(CONFIG_MAC8390) += mac8390.o 8390.o
obj-$(CONFIG_APNE) += apne.o 8390.o
obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
obj-$(CONFIG_SHAPER) += shaper.o
obj-$(CONFIG_SK_G16) += sk_g16.o
obj-$(CONFIG_HP100) += hp100.o
obj-$(CONFIG_SMC9194) += smc9194.o
obj-$(CONFIG_FEC) += fec.o
......@@ -122,7 +122,6 @@ obj-$(CONFIG_DEFXX) += defxx.o
obj-$(CONFIG_SGISEEQ) += sgiseeq.o
obj-$(CONFIG_SGI_O2MACE_ETH) += meth.o
obj-$(CONFIG_AT1700) += at1700.o
obj-$(CONFIG_FMV18X) += fmv18x.o
obj-$(CONFIG_EL1) += 3c501.o
obj-$(CONFIG_EL16) += 3c507.o
obj-$(CONFIG_ELMC) += 3c523.o
......@@ -180,6 +179,7 @@ obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
obj-$(CONFIG_IBMVETH) += ibmveth.o
obj-$(CONFIG_S2IO) += s2io.o
obj-$(CONFIG_SMC91X) += smc91x.o
obj-$(CONFIG_DM9000) += dm9000.o
obj-$(CONFIG_FEC_8XX) += fec_8xx/
obj-$(CONFIG_ARM) += arm/
......
......@@ -210,9 +210,6 @@ static struct devprobe2 isa_probes[] __initdata = {
#ifdef CONFIG_AT1700
{at1700_probe, 0},
#endif
#ifdef CONFIG_FMV18X /* Fujitsu FMV-181/182 */
{fmv18x_probe, 0},
#endif
#ifdef CONFIG_ETH16I
{eth16i_probe, 0}, /* ICL EtherTeam 16i/32 */
#endif
......@@ -243,9 +240,6 @@ static struct devprobe2 isa_probes[] __initdata = {
#ifdef CONFIG_ELPLUS /* 3c505 */
{elplus_probe, 0},
#endif
#ifdef CONFIG_SK_G16
{SK_init, 0},
#endif
#ifdef CONFIG_NI5010
{ni5010_probe, 0},
#endif
......
......@@ -68,6 +68,7 @@ struct etherh_priv {
void __iomem *dma_base;
unsigned int id;
void __iomem *ctrl_port;
void __iomem *base;
unsigned char ctrl;
u32 supported;
};
......@@ -177,7 +178,7 @@ etherh_setif(struct net_device *dev)
switch (etherh_priv(dev)->id) {
case PROD_I3_ETHERLAN600:
case PROD_I3_ETHERLAN600A:
addr = (void *)dev->base_addr + EN0_RCNTHI;
addr = etherh_priv(dev)->base + EN0_RCNTHI;
switch (dev->if_port) {
case IF_PORT_10BASE2:
......@@ -218,7 +219,7 @@ etherh_getifstat(struct net_device *dev)
switch (etherh_priv(dev)->id) {
case PROD_I3_ETHERLAN600:
case PROD_I3_ETHERLAN600A:
addr = (void *)dev->base_addr + EN0_RCNTHI;
addr = etherh_priv(dev)->base + EN0_RCNTHI;
switch (dev->if_port) {
case IF_PORT_10BASE2:
stat = 1;
......@@ -281,7 +282,7 @@ static void
etherh_reset(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
void __iomem *addr = (void *)dev->base_addr;
void __iomem *addr = etherh_priv(dev)->base;
writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr);
......@@ -327,7 +328,7 @@ etherh_block_output (struct net_device *dev, int count, const unsigned char *buf
ei_local->dmaing = 1;
addr = (void *)dev->base_addr;
addr = etherh_priv(dev)->base;
dma_base = etherh_priv(dev)->dma_base;
count = (count + 1) & ~1;
......@@ -387,7 +388,7 @@ etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int
ei_local->dmaing = 1;
addr = (void *)dev->base_addr;
addr = etherh_priv(dev)->base;
dma_base = etherh_priv(dev)->dma_base;
buf = skb->data;
......@@ -427,7 +428,7 @@ etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_p
ei_local->dmaing = 1;
addr = (void *)dev->base_addr;
addr = etherh_priv(dev)->base;
dma_base = etherh_priv(dev)->dma_base;
writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD);
......@@ -696,7 +697,8 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
eh->ctrl_port = eh->ioc_fast;
}
dev->base_addr = (unsigned long)eh->memc + data->ns8390_offset;
eh->base = eh->memc + data->ns8390_offset;
dev->base_addr = (unsigned long)eh->base;
eh->dma_base = eh->memc + data->dataport_offset;
eh->ctrl_port += data->ctrlport_offset;
......
......@@ -1681,10 +1681,6 @@ static int au1000_init(struct net_device *dev)
control |= MAC_FULL_DUPLEX;
}
/* fix for startup without cable */
if (!link)
dev->flags &= ~IFF_RUNNING;
aup->mac->control = control;
aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
au_sync();
......@@ -1709,16 +1705,14 @@ static void au1000_timer(unsigned long data)
if_port = dev->if_port;
if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) {
if (link) {
if (!(dev->flags & IFF_RUNNING)) {
if (!netif_carrier_ok(dev)) {
netif_carrier_on(dev);
dev->flags |= IFF_RUNNING;
printk(KERN_INFO "%s: link up\n", dev->name);
}
}
else {
if (dev->flags & IFF_RUNNING) {
if (netif_carrier_ok(dev)) {
netif_carrier_off(dev);
dev->flags &= ~IFF_RUNNING;
dev->if_port = 0;
printk(KERN_INFO "%s: link down\n", dev->name);
}
......
......@@ -1412,7 +1412,6 @@ static int bmac_open(struct net_device *dev)
bp->opened = 1;
bmac_reset_and_enable(dev);
enable_irq(dev->irq);
dev->flags |= IFF_RUNNING;
return 0;
}
......@@ -1425,7 +1424,6 @@ static int bmac_close(struct net_device *dev)
int i;
bp->sleeping = 1;
dev->flags &= ~(IFF_UP | IFF_RUNNING);
/* disable rx and tx */
config = bmread(dev, RXCFG);
......
This diff is collapsed.
/*
* dm9000 Ethernet
*/
#ifndef _DM9000X_H_
#define _DM9000X_H_
#define DM9000_ID 0x90000A46
/* although the registers are 16 bit, they are 32-bit aligned.
*/
#define DM9000_NCR 0x00
#define DM9000_NSR 0x01
#define DM9000_TCR 0x02
#define DM9000_TSR1 0x03
#define DM9000_TSR2 0x04
#define DM9000_RCR 0x05
#define DM9000_RSR 0x06
#define DM9000_ROCR 0x07
#define DM9000_BPTR 0x08
#define DM9000_FCTR 0x09
#define DM9000_FCR 0x0A
#define DM9000_EPCR 0x0B
#define DM9000_EPAR 0x0C
#define DM9000_EPDRL 0x0D
#define DM9000_EPDRH 0x0E
#define DM9000_WCR 0x0F
#define DM9000_PAR 0x10
#define DM9000_MAR 0x16
#define DM9000_GPCR 0x1e
#define DM9000_GPR 0x1f
#define DM9000_TRPAL 0x22
#define DM9000_TRPAH 0x23
#define DM9000_RWPAL 0x24
#define DM9000_RWPAH 0x25
#define DM9000_VIDL 0x28
#define DM9000_VIDH 0x29
#define DM9000_PIDL 0x2A
#define DM9000_PIDH 0x2B
#define DM9000_CHIPR 0x2C
#define DM9000_SMCR 0x2F
#define DM9000_MRCMDX 0xF0
#define DM9000_MRCMD 0xF2
#define DM9000_MRRL 0xF4
#define DM9000_MRRH 0xF5
#define DM9000_MWCMDX 0xF6
#define DM9000_MWCMD 0xF8
#define DM9000_MWRL 0xFA
#define DM9000_MWRH 0xFB
#define DM9000_TXPLL 0xFC
#define DM9000_TXPLH 0xFD
#define DM9000_ISR 0xFE
#define DM9000_IMR 0xFF
#define NCR_EXT_PHY (1<<7)
#define NCR_WAKEEN (1<<6)
#define NCR_FCOL (1<<4)
#define NCR_FDX (1<<3)
#define NCR_LBK (3<<1)
#define NCR_RST (1<<0)
#define NSR_SPEED (1<<7)
#define NSR_LINKST (1<<6)
#define NSR_WAKEST (1<<5)
#define NSR_TX2END (1<<3)
#define NSR_TX1END (1<<2)
#define NSR_RXOV (1<<1)
#define TCR_TJDIS (1<<6)
#define TCR_EXCECM (1<<5)
#define TCR_PAD_DIS2 (1<<4)
#define TCR_CRC_DIS2 (1<<3)
#define TCR_PAD_DIS1 (1<<2)
#define TCR_CRC_DIS1 (1<<1)
#define TCR_TXREQ (1<<0)
#define TSR_TJTO (1<<7)
#define TSR_LC (1<<6)
#define TSR_NC (1<<5)
#define TSR_LCOL (1<<4)
#define TSR_COL (1<<3)
#define TSR_EC (1<<2)
#define RCR_WTDIS (1<<6)
#define RCR_DIS_LONG (1<<5)
#define RCR_DIS_CRC (1<<4)
#define RCR_ALL (1<<3)
#define RCR_RUNT (1<<2)
#define RCR_PRMSC (1<<1)
#define RCR_RXEN (1<<0)
#define RSR_RF (1<<7)
#define RSR_MF (1<<6)
#define RSR_LCS (1<<5)
#define RSR_RWTO (1<<4)
#define RSR_PLE (1<<3)
#define RSR_AE (1<<2)
#define RSR_CE (1<<1)
#define RSR_FOE (1<<0)
#define FCTR_HWOT(ot) (( ot & 0xf ) << 4 )
#define FCTR_LWOT(ot) ( ot & 0xf )
#define IMR_PAR (1<<7)
#define IMR_ROOM (1<<3)
#define IMR_ROM (1<<2)
#define IMR_PTM (1<<1)
#define IMR_PRM (1<<0)
#define ISR_ROOS (1<<3)
#define ISR_ROS (1<<2)
#define ISR_PTS (1<<1)
#define ISR_PRS (1<<0)
#define ISR_CLR_STATUS (ISR_ROOS | ISR_ROS | ISR_PTS | ISR_PRS)
#define EPCR_REEP (1<<5)
#define EPCR_WEP (1<<4)
#define EPCR_EPOS (1<<3)
#define EPCR_ERPRR (1<<2)
#define EPCR_ERPRW (1<<1)
#define EPCR_ERRE (1<<0)
#define GPCR_GEP_CNTL (1<<0)
#define DM9000_PKT_RDY 0x01 /* Packet ready to receive */
#define DM9000_PKT_MAX 1536 /* Received packet max size */
#endif /* _DM9000X_H_ */
This diff is collapsed.
......@@ -1537,20 +1537,20 @@ static void shmem_get_8390_hdr(struct net_device *dev,
static void shmem_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8)
+ ring_offset
void __iomem *base = ei_status.mem;
unsigned long offset = (TX_PAGES<<8) + ring_offset
- (ei_status.rx_start_page << 8);
char *buf = skb->data;
if (xfer_start + count > (void __iomem *)ei_status.rmem_end) {
if (offset + count > ei_status.priv) {
/* We must wrap the input move. */
int semi_count = (void __iomem *)ei_status.rmem_end - xfer_start;
copyin(buf, xfer_start, semi_count);
int semi_count = ei_status.priv - offset;
copyin(buf, base + offset, semi_count);
buf += semi_count;
xfer_start = ei_status.mem + (TX_PAGES<<8);
offset = TX_PAGES<<8;
count -= semi_count;
}
copyin(buf, xfer_start, count);
copyin(buf, base + offset, count);
}
/*====================================================================*/
......@@ -1611,8 +1611,9 @@ static int setup_shmem_window(dev_link_t *link, int start_pg,
}
ei_status.mem = info->base + offset;
ei_status.priv = req.Size;
dev->mem_start = (u_long)ei_status.mem;
dev->mem_end = ei_status.rmem_end = (u_long)info->base + req.Size;
dev->mem_end = dev->mem_start + req.Size;
ei_status.tx_start_page = start_pg;
ei_status.rx_start_page = start_pg + TX_PAGES;
......
......@@ -1217,36 +1217,43 @@ ppp_push(struct ppp *ppp)
*/
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
{
int nch, len, fragsize;
int len, fragsize;
int i, bits, hdrlen, mtu;
int flen, fnb;
int flen;
int navail, nfree;
int nbigger;
unsigned char *p, *q;
struct list_head *list;
struct channel *pch;
struct sk_buff *frag;
struct ppp_channel *chan;
nch = 0;
nfree = 0; /* # channels which have no packet already queued */
navail = 0; /* total # of usable channels (not deregistered) */
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
i = 0;
list = &ppp->channels;
while ((list = list->next) != &ppp->channels) {
pch = list_entry(list, struct channel, clist);
nch += pch->avail = (skb_queue_len(&pch->file.xq) == 0);
/*
* If a channel hasn't had a fragment yet, it has to get
* one before we send any fragments on later channels.
* If it can't take a fragment now, don't give any
* to subsequent channels.
*/
if (!pch->had_frag && !pch->avail) {
while ((list = list->next) != &ppp->channels) {
pch = list_entry(list, struct channel, clist);
pch->avail = 0;
}
break;
navail += pch->avail = (pch->chan != NULL);
if (pch->avail) {
if (skb_queue_len(&pch->file.xq) == 0
|| !pch->had_frag) {
pch->avail = 2;
++nfree;
}
if (!pch->had_frag && i < ppp->nxchan)
ppp->nxchan = i;
}
++i;
}
if (nch == 0)
/*
* Don't start sending this packet unless at least half of
* the channels are free. This gives much better TCP
* performance if we have a lot of channels.
*/
if (nfree == 0 || nfree < navail / 2)
return 0; /* can't take now, leave it in xmit_pending */
/* Do protocol field compression (XXX this should be optional) */
......@@ -1257,14 +1264,19 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
--len;
}
/* decide on fragment size */
/*
* Decide on fragment size.
* We create a fragment for each free channel regardless of
* how small they are (i.e. even 0 length) in order to minimize
* the time that it will take to detect when a channel drops
* a fragment.
*/
fragsize = len;
if (nch > 1) {
int maxch = ROUNDUP(len, MIN_FRAG_SIZE);
if (nch > maxch)
nch = maxch;
fragsize = ROUNDUP(fragsize, nch);
}
if (nfree > 1)
fragsize = ROUNDUP(fragsize, nfree);
/* nbigger channels get fragsize bytes, the rest get fragsize-1,
except if nbigger==0, then they all get fragsize. */
nbigger = len % nfree;
/* skip to the channel after the one we last used
and start at that one */
......@@ -1278,7 +1290,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
/* create a fragment for each channel */
bits = B;
do {
while (nfree > 0 || len > 0) {
list = list->next;
if (list == &ppp->channels) {
i = 0;
......@@ -1289,33 +1301,50 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
if (!pch->avail)
continue;
/*
* Skip this channel if it has a fragment pending already and
* we haven't given a fragment to all of the free channels.
*/
if (pch->avail == 1) {
if (nfree > 0)
continue;
} else {
--nfree;
pch->avail = 1;
}
/* check the channel's mtu and whether it is still attached. */
spin_lock_bh(&pch->downl);
if (pch->chan == 0 || (mtu = pch->chan->mtu) < hdrlen) {
/* can't use this channel */
if (pch->chan == NULL) {
/* can't use this channel, it's being deregistered */
spin_unlock_bh(&pch->downl);
pch->avail = 0;
if (--nch == 0)
if (--navail == 0)
break;
continue;
}
/*
* We have to create multiple fragments for this channel
* if fragsize is greater than the channel's mtu.
* Create a fragment for this channel of
* min(max(mtu+2-hdrlen, 4), fragsize, len) bytes.
* If mtu+2-hdrlen < 4, that is a ridiculously small
* MTU, so we use mtu = 2 + hdrlen.
*/
if (fragsize > len)
fragsize = len;
for (flen = fragsize; flen > 0; flen -= fnb) {
fnb = flen;
if (fnb > mtu + 2 - hdrlen)
fnb = mtu + 2 - hdrlen;
if (fnb >= len)
flen = fragsize;
mtu = pch->chan->mtu + 2 - hdrlen;
if (mtu < 4)
mtu = 4;
if (flen > mtu)
flen = mtu;
if (flen == len && nfree == 0)
bits |= E;
frag = alloc_skb(fnb + hdrlen, GFP_ATOMIC);
frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
if (frag == 0)
goto noskb;
q = skb_put(frag, fnb + hdrlen);
q = skb_put(frag, flen + hdrlen);
/* make the MP header */
q[0] = PPP_MP >> 8;
q[1] = PPP_MP;
......@@ -1329,21 +1358,35 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
q[5] = ppp->nxseq;
}
/* copy the data in */
memcpy(q + hdrlen, p, fnb);
/*
* Copy the data in.
* Unfortunately there is a bug in older versions of
* the Linux PPP multilink reconstruction code where it
* drops 0-length fragments. Therefore we make sure the
* fragment has at least one byte of data. Any bytes
* we add in this situation will end up as padding on the
* end of the reconstructed packet.
*/
if (flen == 0)
*skb_put(frag, 1) = 0;
else
memcpy(q + hdrlen, p, flen);
/* try to send it down the channel */
chan = pch->chan;
if (!chan->ops->start_xmit(chan, frag))
if (skb_queue_len(&pch->file.xq)
|| !chan->ops->start_xmit(chan, frag))
skb_queue_tail(&pch->file.xq, frag);
pch->had_frag = 1;
p += fnb;
len -= fnb;
p += flen;
len -= flen;
++ppp->nxseq;
bits = 0;
}
spin_unlock_bh(&pch->downl);
} while (len > 0);
if (--nbigger == 0 && fragsize > 0)
--fragsize;
}
ppp->nxchan = i;
return 1;
......@@ -1691,7 +1734,7 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
struct list_head *l;
int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
if (!pskb_may_pull(skb, mphdrlen) || ppp->mrru == 0)
goto err; /* no good, throw it away */
/* Decode sequence number and begin/end bits */
......
This diff is collapsed.
......@@ -4212,7 +4212,7 @@ SK_BOOL DualNet;
Flags);
SkGeStopPort(pAC, IoC, FromPort, SK_STOP_ALL, SK_HARD_RST);
pAC->dev[Param.Para32[0]]->flags &= ~IFF_RUNNING;
netif_carrier_off(pAC->dev[Param.Para32[0]]);
spin_unlock_irqrestore(
&pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
Flags);
......@@ -4355,7 +4355,7 @@ SK_BOOL DualNet;
}
/* Inform the world that link protocol is up. */
pAC->dev[Param.Para32[0]]->flags |= IFF_RUNNING;
netif_carrier_on(pAC->dev[Param.Para32[0]]);
break;
case SK_DRV_NET_DOWN: /* SK_U32 Reason */
......@@ -4368,7 +4368,7 @@ SK_BOOL DualNet;
} else {
DoPrintInterfaceChange = SK_TRUE;
}
pAC->dev[Param.Para32[1]]->flags &= ~IFF_RUNNING;
netif_carrier_off(pAC->dev[Param.Para32[1]]);
break;
case SK_DRV_SWITCH_HARD: /* SK_U32 FromPortIdx SK_U32 ToPortIdx */
SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
......@@ -4961,7 +4961,6 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = &SkGePollController;
#endif
dev->flags &= ~IFF_RUNNING;
SET_NETDEV_DEV(dev, &pdev->dev);
SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps);
......@@ -5035,7 +5034,6 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
dev->set_mac_address = &SkGeSetMacAddr;
dev->do_ioctl = &SkGeIoctl;
dev->change_mtu = &SkGeChangeMtu;
dev->flags &= ~IFF_RUNNING;
SET_NETDEV_DEV(dev, &pdev->dev);
SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps);
......
This diff is collapsed.
/*-
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* Module : sk_g16.h
* Version : $Revision$
*
* Author : M.Hipp (mhipp@student.uni-tuebingen.de)
* changes by : Patrick J.D. Weichmann
*
* Date Created : 94/05/25
*
* Description : In here are all necessary definitions of
* the am7990 (LANCE) chip used for writing a
* network device driver which uses this chip
*
* $Log$
-*/
#ifndef SK_G16_H
#define SK_G16_H
/*
* Control and Status Register 0 (CSR0) bit definitions
*
* (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
*
*/
#define CSR0_ERR 0x8000 /* Error summary (R) */
#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
#define CSR0_CERR 0x2000 /* Collision Error (RC) */
#define CSR0_MISS 0x1000 /* Missed packet (RC) */
#define CSR0_MERR 0x0800 /* Memory Error (RC) */
#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
#define CSR0_RXON 0x0020 /* Receiver on (R) */
#define CSR0_TXON 0x0010 /* Transmitter on (R) */
#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
#define CSR0_STOP 0x0004 /* Stop (RS) */
#define CSR0_STRT 0x0002 /* Start (RS) */
#define CSR0_INIT 0x0001 /* Initialize (RS) */
#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
/*
* Control and Status Register 3 (CSR3) bit definitions
*
*/
#define CSR3_BSWAP 0x0004 /* Byte Swap (RW) */
#define CSR3_ACON 0x0002 /* ALE Control (RW) */
#define CSR3_BCON 0x0001 /* Byte Control (RW) */
/*
* Initialization Block Mode operation Bit Definitions.
*/
#define MODE_PROM 0x8000 /* Promiscuous Mode */
#define MODE_INTL 0x0040 /* Internal Loopback */
#define MODE_DRTY 0x0020 /* Disable Retry */
#define MODE_COLL 0x0010 /* Force Collision */
#define MODE_DTCR 0x0008 /* Disable Transmit CRC) */
#define MODE_LOOP 0x0004 /* Loopback */
#define MODE_DTX 0x0002 /* Disable the Transmitter */
#define MODE_DRX 0x0001 /* Disable the Receiver */
#define MODE_NORMAL 0x0000 /* Normal operation mode */
/*
* Receive message descriptor status bit definitions.
*/
#define RX_OWN 0x80 /* Owner bit 0 = host, 1 = lance */
#define RX_ERR 0x40 /* Error Summary */
#define RX_FRAM 0x20 /* Framing Error */
#define RX_OFLO 0x10 /* Overflow Error */
#define RX_CRC 0x08 /* CRC Error */
#define RX_BUFF 0x04 /* Buffer Error */
#define RX_STP 0x02 /* Start of Packet */
#define RX_ENP 0x01 /* End of Packet */
/*
* Transmit message descriptor status bit definitions.
*/
#define TX_OWN 0x80 /* Owner bit 0 = host, 1 = lance */
#define TX_ERR 0x40 /* Error Summary */
#define TX_MORE 0x10 /* More the 1 retry needed to Xmit */
#define TX_ONE 0x08 /* One retry needed to Xmit */
#define TX_DEF 0x04 /* Deferred */
#define TX_STP 0x02 /* Start of Packet */
#define TX_ENP 0x01 /* End of Packet */
/*
* Transmit status (2) (valid if TX_ERR == 1)
*/
#define TX_BUFF 0x8000 /* Buffering error (no ENP) */
#define TX_UFLO 0x4000 /* Underflow (late memory) */
#define TX_LCOL 0x1000 /* Late collision */
#define TX_LCAR 0x0400 /* Loss of Carrier */
#define TX_RTRY 0x0200 /* Failed after 16 retransmissions */
#define TX_TDR 0x003f /* Time-domain-reflectometer-value */
/*
* Structures used for Communication with the LANCE
*/
/* LANCE Initialize Block */
struct init_block
{
unsigned short mode; /* Mode Register */
unsigned char paddr[6]; /* Physical Address (MAC) */
unsigned char laddr[8]; /* Logical Filter Address (not used) */
unsigned int rdrp; /* Receive Descriptor Ring pointer */
unsigned int tdrp; /* Transmit Descriptor Ring pointer */
};
/* Receive Message Descriptor Entry */
struct rmd
{
union
{
unsigned long buffer; /* Address of buffer */
struct
{
unsigned char unused[3];
unsigned volatile char status; /* Status Bits */
} s;
} u;
volatile short blen; /* Buffer Length (two's complement) */
unsigned short mlen; /* Message Byte Count */
};
/* Transmit Message Descriptor Entry */
struct tmd
{
union
{
unsigned long buffer; /* Address of buffer */
struct
{
unsigned char unused[3];
unsigned volatile char status; /* Status Bits */
} s;
} u;
unsigned short blen; /* Buffer Length (two's complement) */
unsigned volatile short status2; /* Error Status Bits */
};
#endif /* End of SK_G16_H */
This diff is collapsed.
This diff is collapsed.
......@@ -129,7 +129,7 @@ MODULE_PARM_DESC(nowait, "set to 1 for no wait state");
/*
* Transmit timeout, default 5 seconds.
*/
static int watchdog = 5000;
static int watchdog = 1000;
module_param(watchdog, int, 0400);
MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
......@@ -660,15 +660,14 @@ static void smc_hardware_send_pkt(unsigned long data)
SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG);
/*
* If THROTTLE_TX_PKTS is set, we look at the TX_EMPTY flag
* before queueing this packet for TX, and if it's clear then
* we stop the queue here. This will have the effect of
* having at most 2 packets queued for TX in the chip's memory
* at all time. If THROTTLE_TX_PKTS is not set then the queue
* is stopped only when memory allocation (MC_ALLOC) does not
* succeed right away.
*/
if (THROTTLE_TX_PKTS && !(SMC_GET_INT() & IM_TX_EMPTY_INT))
* If THROTTLE_TX_PKTS is set, we stop the queue here. This will
* have the effect of having at most one packet queued for TX
* in the chip's memory at all time.
*
* If THROTTLE_TX_PKTS is not set then the queue is stopped only
* when memory allocation (MC_ALLOC) does not succeed right away.
*/
if (THROTTLE_TX_PKTS)
netif_stop_queue(dev);
/* queue the packet for TX */
......@@ -792,17 +791,20 @@ static void smc_tx(struct net_device *dev)
DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n",
dev->name, tx_status, packet_no);
if (!(tx_status & TS_SUCCESS))
if (!(tx_status & ES_TX_SUC))
lp->stats.tx_errors++;
if (tx_status & TS_LOSTCAR)
if (tx_status & ES_LOSTCARR)
lp->stats.tx_carrier_errors++;
if (tx_status & TS_LATCOL) {
PRINTK("%s: late collision occurred on last xmit\n", dev->name);
if (tx_status & (ES_LATCOL | ES_16COL)) {
PRINTK("%s: %s occurred on last xmit\n", dev->name,
(tx_status & ES_LATCOL) ?
"late collision" : "too many collisions");
lp->stats.tx_window_errors++;
if (!(lp->stats.tx_window_errors & 63) && net_ratelimit()) {
printk(KERN_INFO "%s: unexpectedly large numbers of "
"late collisions. Please check duplex "
printk(KERN_INFO "%s: unexpectedly large number of "
"bad collisions. Please check duplex "
"setting.\n", dev->name);
}
}
......@@ -1236,7 +1238,7 @@ static void smc_10bt_check_media(struct net_device *dev, int init)
old_carrier = netif_carrier_ok(dev) ? 1 : 0;
SMC_SELECT_BANK(0);
new_carrier = SMC_inw(ioaddr, EPH_STATUS_REG) & ES_LINK_OK ? 1 : 0;
new_carrier = (SMC_GET_EPH_STATUS() & ES_LINK_OK) ? 1 : 0;
SMC_SELECT_BANK(2);
if (init || (old_carrier != new_carrier)) {
......@@ -1308,15 +1310,16 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
if (!status)
break;
if (status & IM_RCV_INT) {
DBG(3, "%s: RX irq\n", dev->name);
smc_rcv(dev);
} else if (status & IM_TX_INT) {
if (status & IM_TX_INT) {
/* do this before RX as it will free memory quickly */
DBG(3, "%s: TX int\n", dev->name);
smc_tx(dev);
SMC_ACK_INT(IM_TX_INT);
if (THROTTLE_TX_PKTS)
netif_wake_queue(dev);
} else if (status & IM_RCV_INT) {
DBG(3, "%s: RX irq\n", dev->name);
smc_rcv(dev);
} else if (status & IM_ALLOC_INT) {
DBG(3, "%s: Allocation irq\n", dev->name);
tasklet_hi_schedule(&lp->tx_task);
......@@ -1337,7 +1340,10 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/* multiple collisions */
lp->stats.collisions += card_stats & 0xF;
} else if (status & IM_RX_OVRN_INT) {
DBG(1, "%s: RX overrun\n", dev->name);
DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name,
({ int eph_st; SMC_SELECT_BANK(0);
eph_st = SMC_GET_EPH_STATUS();
SMC_SELECT_BANK(2); eph_st; }) );
SMC_ACK_INT(IM_RX_OVRN_INT);
lp->stats.rx_errors++;
lp->stats.rx_fifo_errors++;
......@@ -1389,7 +1395,7 @@ static void smc_timeout(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
int status, mask, meminfo, fifo;
int status, mask, eph_st, meminfo, fifo;
DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
......@@ -1398,11 +1404,13 @@ static void smc_timeout(struct net_device *dev)
mask = SMC_GET_INT_MASK();
fifo = SMC_GET_FIFO();
SMC_SELECT_BANK(0);
eph_st = SMC_GET_EPH_STATUS();
meminfo = SMC_GET_MIR();
SMC_SELECT_BANK(2);
spin_unlock_irq(&lp->lock);
PRINTK( "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
dev->name, status, mask, meminfo, fifo );
PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x "
"MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
dev->name, status, mask, meminfo, fifo, eph_st );
smc_reset(dev);
smc_enable(dev);
......@@ -1863,7 +1871,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr)
SMC_SELECT_BANK(1);
val = SMC_GET_BASE();
val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
if (((unsigned long)ioaddr & ((PAGE_SIZE-1)<<SMC_IO_SHIFT)) != val) { /*XXX: WTF? */
if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
printk("%s: IOADDR %p doesn't match configuration (%x).\n",
CARDNAME, ioaddr, val);
}
......
......@@ -151,7 +151,7 @@
/* We actually can't write halfwords properly if not word aligned */
static inline void
SMC_outw(u16 val, unsigned long ioaddr, int reg)
SMC_outw(u16 val, void __iomem *ioaddr, int reg)
{
if (reg & 2) {
unsigned int v = val << 16;
......@@ -317,7 +317,7 @@ static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
#define SMC_insl(a, r, p, l) \
smc_pxa_dma_insl(a, lp->physaddr, r, dev->dma, p, l)
static inline void
smc_pxa_dma_insl(u_long ioaddr, u_long physaddr, int reg, int dma,
smc_pxa_dma_insl(void __iomem *ioaddr, u_long physaddr, int reg, int dma,
u_char *buf, int len)
{
dma_addr_t dmabuf;
......@@ -355,7 +355,7 @@ smc_pxa_dma_insl(u_long ioaddr, u_long physaddr, int reg, int dma,
#define SMC_insw(a, r, p, l) \
smc_pxa_dma_insw(a, lp->physaddr, r, dev->dma, p, l)
static inline void
smc_pxa_dma_insw(u_long ioaddr, u_long physaddr, int reg, int dma,
smc_pxa_dma_insw(void __iomem *ioaddr, u_long physaddr, int reg, int dma,
u_char *buf, int len)
{
dma_addr_t dmabuf;
......@@ -680,14 +680,6 @@ static const char * chip_ids[ 16 ] = {
NULL, NULL, NULL};
/*
. Transmit status bits
*/
#define TS_SUCCESS 0x0001
#define TS_LOSTCAR 0x0400
#define TS_LATCOL 0x0200
#define TS_16COL 0x0010
/*
. Receive status bits
*/
......@@ -845,6 +837,7 @@ static const char * chip_ids[ 16 ] = {
#define SMC_GET_FIFO() SMC_inw( ioaddr, FIFO_REG )
#define SMC_GET_PTR() SMC_inw( ioaddr, PTR_REG )
#define SMC_SET_PTR(x) SMC_outw( x, ioaddr, PTR_REG )
#define SMC_GET_EPH_STATUS() SMC_inw( ioaddr, EPH_STATUS_REG )
#define SMC_GET_RCR() SMC_inw( ioaddr, RCR_REG )
#define SMC_SET_RCR(x) SMC_outw( x, ioaddr, RCR_REG )
#define SMC_GET_REV() SMC_inw( ioaddr, REV_REG )
......
This diff is collapsed.
/*
* Copyright 2003 Adaptec, Inc.
*
* Please read the following license before using the Adaptec Software
* ("Program"). If you do not agree to the license terms, do not use the
* Program:
*
* You agree to be bound by version 2 of the General Public License ("GPL")
* dated June 1991, which can be found at http://www.fsf.org/licenses/gpl.html.
* If the link is broken, write to Free Software Foundation, 59 Temple Place,
* Boston, Massachusetts 02111-1307.
*
* BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE IT IS LICENSED "AS IS" AND
* THERE IS NO WARRANTY FOR THE PROGRAM, INCLUDING BUT NOT LIMITED TO THE
* IMPLIED WARRANTIES OF MERCHANTIBILITY OR FITNESS FOR A PARTICULAR PURPOSE
* (TO THE EXTENT PERMITTED BY APPLICABLE LAW). USE OF THE PROGRAM IS AT YOUR
* OWN RISK. IN NO EVENT WILL ADAPTEC OR ITS LICENSORS BE LIABLE TO YOU FOR
* DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM.
*
*/
static const u32 firmware_rx[] = {
0x010003dc, 0x00000000,
0x04000421, 0x00000086,
0x80000015, 0x0000180e,
0x81000015, 0x00006664,
0x1a0040ab, 0x00000b06,
0x14200011, 0x00000000,
0x14204022, 0x0000aaaa,
0x14204022, 0x00000300,
0x14204022, 0x00000000,
0x1a0040ab, 0x00000b14,
0x14200011, 0x00000000,
0x83000015, 0x00000002,
0x04000021, 0x00000000,
0x00000010, 0x00000000,
0x04000421, 0x00000087,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x00008015, 0x00000000,
0x0000003e, 0x00000000,
0x00000010, 0x00000000,
0x82000015, 0x00004000,
0x009e8050, 0x00000000,
0x03008015, 0x00000000,
0x86008015, 0x00000000,
0x82000015, 0x00008000,
0x0100001c, 0x00000000,
0x000050a0, 0x0000010c,
0x4e20d011, 0x00006008,
0x1420d012, 0x00004008,
0x0000f090, 0x00007000,
0x0000c8b0, 0x00003000,
0x00004040, 0x00000000,
0x00108015, 0x00000000,
0x00a2c150, 0x00004000,
0x00a400b0, 0x00000014,
0x00000020, 0x00000000,
0x2500400d, 0x00002525,
0x00047220, 0x00003100,
0x00934070, 0x00000000,
0x00000020, 0x00000000,
0x00924460, 0x00000184,
0x2b20c011, 0x00000000,
0x0000c420, 0x00000540,
0x36014018, 0x0000422d,
0x14200011, 0x00000000,
0x00924460, 0x00000183,
0x3200001f, 0x00000034,
0x02ac0015, 0x00000002,
0x00a60110, 0x00000008,
0x42200011, 0x00000000,
0x00924060, 0x00000103,
0x0000001e, 0x00000000,
0x00000020, 0x00000100,
0x0000001e, 0x00000000,
0x00924460, 0x00000086,
0x00004080, 0x00000000,
0x0092c070, 0x00000000,
0x00924060, 0x00000100,
0x0000c890, 0x00005000,
0x00a6c110, 0x00000000,
0x00b0c090, 0x00000012,
0x021c0015, 0x00000000,
0x3200001f, 0x00000034,
0x00924460, 0x00000510,
0x44210011, 0x00000000,
0x42000011, 0x00000000,
0x83000015, 0x00000040,
0x00924460, 0x00000508,
0x45014018, 0x00004545,
0x00808050, 0x00000000,
0x62208012, 0x00000000,
0x82000015, 0x00000800,
0x15200011, 0x00000000,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x80000015, 0x0000eea4,
0x81000015, 0x0000005f,
0x00000060, 0x00000000,
0x00004120, 0x00000000,
0x00004a00, 0x00004000,
0x00924460, 0x00000190,
0x5601401a, 0x00005956,
0x14000011, 0x00000000,
0x00934050, 0x00000018,
0x00930050, 0x00000018,
0x3601403a, 0x0000002d,
0x000643a9, 0x00000000,
0x0000c420, 0x00000140,
0x5601401a, 0x00005956,
0x14000011, 0x00000000,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x000642a9, 0x00000000,
0x00024420, 0x00000183,
0x5601401a, 0x00005956,
0x82000015, 0x00002000,
0x15200011, 0x00000000,
0x82000015, 0x00000010,
0x15200011, 0x00000000,
0x82000015, 0x00000010,
0x15200011, 0x00000000,
}; /* 104 Rx instructions */
#define FIRMWARE_RX_SIZE 104
static const u32 firmware_tx[] = {
0x010003dc, 0x00000000,
0x04000421, 0x00000086,
0x80000015, 0x0000180e,
0x81000015, 0x00006664,
0x1a0040ab, 0x00000b06,
0x14200011, 0x00000000,
0x14204022, 0x0000aaaa,
0x14204022, 0x00000300,
0x14204022, 0x00000000,
0x1a0040ab, 0x00000b14,
0x14200011, 0x00000000,
0x83000015, 0x00000002,
0x04000021, 0x00000000,
0x00000010, 0x00000000,
0x04000421, 0x00000087,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x00008015, 0x00000000,
0x0000003e, 0x00000000,
0x00000010, 0x00000000,
0x82000015, 0x00004000,
0x009e8050, 0x00000000,
0x03008015, 0x00000000,
0x86008015, 0x00000000,
0x82000015, 0x00008000,
0x0100001c, 0x00000000,
0x000050a0, 0x0000010c,
0x4e20d011, 0x00006008,
0x1420d012, 0x00004008,
0x0000f090, 0x00007000,
0x0000c8b0, 0x00003000,
0x00004040, 0x00000000,
0x00108015, 0x00000000,
0x00a2c150, 0x00004000,
0x00a400b0, 0x00000014,
0x00000020, 0x00000000,
0x2500400d, 0x00002525,
0x00047220, 0x00003100,
0x00934070, 0x00000000,
0x00000020, 0x00000000,
0x00924460, 0x00000184,
0x2b20c011, 0x00000000,
0x0000c420, 0x00000540,
0x36014018, 0x0000422d,
0x14200011, 0x00000000,
0x00924460, 0x00000183,
0x3200001f, 0x00000034,
0x02ac0015, 0x00000002,
0x00a60110, 0x00000008,
0x42200011, 0x00000000,
0x00924060, 0x00000103,
0x0000001e, 0x00000000,
0x00000020, 0x00000100,
0x0000001e, 0x00000000,
0x00924460, 0x00000086,
0x00004080, 0x00000000,
0x0092c070, 0x00000000,
0x00924060, 0x00000100,
0x0000c890, 0x00005000,
0x00a6c110, 0x00000000,
0x00b0c090, 0x00000012,
0x021c0015, 0x00000000,
0x3200001f, 0x00000034,
0x00924460, 0x00000510,
0x44210011, 0x00000000,
0x42000011, 0x00000000,
0x83000015, 0x00000040,
0x00924460, 0x00000508,
0x45014018, 0x00004545,
0x00808050, 0x00000000,
0x62208012, 0x00000000,
0x82000015, 0x00000800,
0x15200011, 0x00000000,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x80000015, 0x0000eea4,
0x81000015, 0x0000005f,
0x00000060, 0x00000000,
0x00004120, 0x00000000,
0x00004a00, 0x00004000,
0x00924460, 0x00000190,
0x5601401a, 0x00005956,
0x14000011, 0x00000000,
0x00934050, 0x00000018,
0x00930050, 0x00000018,
0x3601403a, 0x0000002d,
0x000643a9, 0x00000000,
0x0000c420, 0x00000140,
0x5601401a, 0x00005956,
0x14000011, 0x00000000,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x000642a9, 0x00000000,
0x00024420, 0x00000183,
0x5601401a, 0x00005956,
0x82000015, 0x00002000,
0x15200011, 0x00000000,
0x82000015, 0x00000010,
0x15200011, 0x00000000,
0x82000015, 0x00000010,
0x15200011, 0x00000000,
}; /* 104 Tx instructions */
#define FIRMWARE_TX_SIZE 104
#if 0
static const u32 firmware_wol[] = {
0x010003dc, 0x00000000,
0x19000421, 0x00000087,
0x80000015, 0x00001a1a,
0x81000015, 0x00001a1a,
0x1a0040ab, 0x00000b06,
0x15200011, 0x00000000,
0x15204022, 0x0000aaaa,
0x15204022, 0x00000300,
0x15204022, 0x00000000,
0x1a0040ab, 0x00000b15,
0x15200011, 0x00000000,
0x83000015, 0x00000002,
0x04000021, 0x00000000,
0x00000010, 0x00000000,
0x04000421, 0x00000087,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x00008015, 0x00000000,
0x0000003e, 0x00000000,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x82000015, 0x00004000,
0x82000015, 0x00008000,
0x0000000c, 0x00000000,
0x00000010, 0x00000000,
0x00004080, 0x00000100,
0x1f20c011, 0x00001122,
0x2720f011, 0x00003011,
0x19200071, 0x00000000,
0x1a200051, 0x00000000,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x1d2040a4, 0x00003344,
0x1d2040a2, 0x00005566,
0x000040a0, 0x00000100,
0x00108050, 0x00000001,
0x1a208012, 0x00000006,
0x82000015, 0x00008080,
0x010003dc, 0x00000000,
0x1d2040a4, 0x00002233,
0x1d2040a4, 0x00004455,
0x2d208011, 0x00000005,
0x1d2040a4, 0x00006611,
0x00108050, 0x00000001,
0x27200011, 0x00000000,
0x1d2050a4, 0x00006600,
0x82000015, 0x00008080,
0x010003dc, 0x00000000,
0x00000050, 0x00000000,
0x1b200031, 0x00000000,
0x0000001e, 0x00000000,
0x0000001e, 0x00000000,
0x0000001e, 0x00000000,
0x0000001e, 0x00000000,
0x00924460, 0x00000086,
0x00004080, 0x00000000,
0x0092c070, 0x00000000,
0x00924060, 0x00000100,
0x0000c890, 0x00005000,
0x00a6c110, 0x00000000,
0x00b0c090, 0x00000012,
0x021c0015, 0x00000000,
0x3200001f, 0x00000034,
0x00924460, 0x00000510,
0x44210011, 0x00000000,
0x42000011, 0x00000000,
0x83000015, 0x00000040,
0x00924460, 0x00000508,
0x476a0012, 0x00000100,
0x83000015, 0x00000008,
0x16200011, 0x00000000,
0x001e8050, 0x00000000,
0x001e8050, 0x00000000,
0x00808050, 0x00000000,
0x03008015, 0x00000000,
0x62208012, 0x00000000,
0x82000015, 0x00000800,
0x16200011, 0x00000000,
0x80000015, 0x0000eea4,
0x81000015, 0x0000005f,
0x00000020, 0x00000000,
0x00004120, 0x00000000,
0x00004a00, 0x00004000,
0x00924460, 0x00000190,
0x5c01401a, 0x0000595c,
0x15000011, 0x00000000,
0x00934050, 0x00000018,
0x00930050, 0x00000018,
0x3601403a, 0x0000002d,
0x00064029, 0x00000000,
0x0000c420, 0x00000140,
0x5c01401a, 0x0000595c,
0x15000011, 0x00000000,
0x00000010, 0x00000000,
0x00000010, 0x00000000,
0x00064029, 0x00000000,
0x00024420, 0x00000183,
0x5c01401a, 0x0000595c,
0x82000015, 0x00002000,
0x16200011, 0x00000000,
0x82000015, 0x00000010,
0x16200011, 0x00000000,
0x82000015, 0x00000010,
0x16200011, 0x00000000,
}; /* 104 WoL instructions */
#define FIRMWARE_WOL_SIZE 104
#endif
......@@ -2819,7 +2819,7 @@ void TLan_PhyMonitor( struct net_device *dev )
if (priv->link) {
priv->link = 0;
printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name);
dev->flags &= ~IFF_RUNNING;
netif_carrier_off(dev);
TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
return;
}
......@@ -2829,7 +2829,7 @@ void TLan_PhyMonitor( struct net_device *dev )
if ((phy_status & MII_GS_LINK) && !priv->link) {
priv->link = 1;
printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name);
dev->flags |= IFF_RUNNING;
netif_carrier_on(dev);
}
/* Setup a new monitor */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -119,7 +119,6 @@ extern struct net_device *alloc_orinocodev(int sizeof_card,
extern void free_orinocodev(struct net_device *dev);
extern int __orinoco_up(struct net_device *dev);
extern int __orinoco_down(struct net_device *dev);
extern int orinoco_stop(struct net_device *dev);
extern int orinoco_reinit_firmware(struct net_device *dev);
extern irqreturn_t orinoco_interrupt(int irq, void * dev_id, struct pt_regs *regs);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment