Commit 4dfd2c88 authored by Jeff Garzik's avatar Jeff Garzik

Merge pobox.com:/garz/repo/netdev-2.6/r8169

into pobox.com:/garz/repo/net-drivers-2.6
parents f60fd965 966f4e9e
......@@ -1979,6 +1979,15 @@ config R8169_NAPI
If in doubt, say N.
config R8169_VLAN
bool "VLAN support"
depends on R8169 && VLAN_8021Q
---help---
Say Y here for the r8169 driver to support the functions required
by the kernel 802.1Q code.
If in doubt, say Y.
config SK98LIN
tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support"
depends on PCI
......
......@@ -6,6 +6,7 @@
History:
Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>.
May 20 2002 - Add link status force-mode and TBI mode support.
2004 - Massive updates. See kernel SCM system for details.
=========================================================================
1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
Command: 'insmod r8169 media = SET_MEDIA'
......@@ -33,22 +34,34 @@ VERSION 1.2 <2002/11/30>
- Copy mc_filter setup code from 8139cp
(includes an optimization, and avoids set_bit use)
VERSION 1.6LK <2004/04/14>
- Merge of Realtek's version 1.6
- Conversion to DMA API
- Suspend/resume
- Endianness
- Misc Rx/Tx bugs
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#define RTL8169_VERSION "1.2"
#define RTL8169_VERSION "1.6LK"
#define MODULENAME "r8169"
#define RTL8169_DRIVER_NAME MODULENAME " Gigabit Ethernet driver " RTL8169_VERSION
#define PFX MODULENAME ": "
......@@ -65,17 +78,23 @@ VERSION 1.2 <2002/11/30>
#define dprintk(fmt, args...) do {} while (0)
#endif /* RTL8169_DEBUG */
#define TX_BUFFS_AVAIL(tp) \
(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
#ifdef CONFIG_R8169_NAPI
#define rtl8169_rx_skb netif_receive_skb
#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
#define rtl8169_rx_quota(count, quota) min(count, quota)
#else
#define rtl8169_rx_skb netif_rx
#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
#define rtl8169_rx_quota(count, quota) count
#endif
/* media options */
#define MAX_UNITS 8
static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
static int num_media = 0;
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
......@@ -87,9 +106,6 @@ static int multicast_filter_limit = 32;
/* MAC address length*/
#define MAC_ADDR_LEN 6
/* max supported gigabit ethernet frame size -- must be at least (dev->mtu+14+4).*/
#define MAX_ETH_FRAME_SIZE 1536
#define TX_FIFO_THRESH 256 /* In bytes */
#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
......@@ -99,6 +115,7 @@ static int multicast_filter_limit = 32;
#define RxPacketMaxSize 0x0800 /* Maximum size supported is 16K-1 */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
#define R8169_REGS_SIZE 256
#define R8169_NAPI_WEIGHT 64
#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
......@@ -106,7 +123,6 @@ static int multicast_filter_limit = 32;
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
#define RTL_MIN_IO_SIZE 0x80
#define RTL8169_TX_TIMEOUT (6*HZ)
#define RTL8169_PHY_TIMEOUT (10*HZ)
......@@ -122,7 +138,8 @@ enum mac_version {
RTL_GIGA_MAC_VER_B = 0x00,
/* RTL_GIGA_MAC_VER_C = 0x03, */
RTL_GIGA_MAC_VER_D = 0x01,
RTL_GIGA_MAC_VER_E = 0x02
RTL_GIGA_MAC_VER_E = 0x02,
RTL_GIGA_MAC_VER_X = 0x04 /* Greater than RTL_GIGA_MAC_VER_E */
};
enum phy_version {
......@@ -305,28 +322,57 @@ enum RTL8169_register_content {
};
enum _DescStatusBit {
OWNbit = 0x80000000,
EORbit = 0x40000000,
FSbit = 0x20000000,
LSbit = 0x10000000,
DescOwn = (1 << 31), /* Descriptor is owned by NIC */
RingEnd = (1 << 30), /* End of descriptor ring */
FirstFrag = (1 << 29), /* First segment of a packet */
LastFrag = (1 << 28), /* Final segment of a packet */
/* Tx private */
LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
MSSShift = 16, /* MSS value position */
MSSMask = 0xfff, /* MSS value + LargeSend bit: 12 bits */
IPCS = (1 << 18), /* Calculate IP checksum */
UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
TxVlanTag = (1 << 17), /* Add VLAN tag */
/* Rx private */
PID1 = (1 << 18), /* Protocol ID bit 1/2 */
PID0 = (1 << 17), /* Protocol ID bit 2/2 */
#define RxProtoUDP (PID1)
#define RxProtoTCP (PID0)
#define RxProtoIP (PID1 | PID0)
#define RxProtoMask RxProtoIP
IPFail = (1 << 16), /* IP checksum failed */
UDPFail = (1 << 15), /* UDP/IP checksum failed */
TCPFail = (1 << 14), /* TCP/IP checksum failed */
RxVlanTag = (1 << 16), /* VLAN tag available */
};
#define RsvdMask 0x3fffc000
struct TxDesc {
u32 status;
u32 vlan_tag;
u32 opts1;
u32 opts2;
u64 addr;
};
struct RxDesc {
u32 status;
u32 vlan_tag;
u32 opts1;
u32 opts2;
u64 addr;
};
struct ring_info {
struct sk_buff *skb;
u32 len;
u8 __pad[sizeof(void *) - sizeof(u32)];
};
struct rtl8169_private {
void *mmio_addr; /* memory map physical address */
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev; /* Index of PCI device */
struct net_device_stats stats; /* statistics of net device */
spinlock_t lock; /* spin lock flag */
......@@ -342,25 +388,29 @@ struct rtl8169_private {
dma_addr_t TxPhyAddr;
dma_addr_t RxPhyAddr;
struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */
struct sk_buff *Tx_skbuff[NUM_TX_DESC]; /* Tx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
unsigned rx_buf_sz;
struct timer_list timer;
u16 cp_cmd;
u16 intr_mask;
int phy_auto_nego_reg;
int phy_1000_ctrl_reg;
#ifdef CONFIG_R8169_VLAN
struct vlan_group *vlgrp;
#endif
int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
void (*get_settings)(struct net_device *, struct ethtool_cmd *);
void (*phy_reset_enable)(void *);
unsigned int (*phy_reset_pending)(void *);
unsigned int (*link_ok)(void *);
void (*phy_reset_enable)(void __iomem *);
unsigned int (*phy_reset_pending)(void __iomem *);
unsigned int (*link_ok)(void __iomem *);
struct work_struct task;
};
MODULE_AUTHOR("Realtek");
MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
MODULE_PARM(media, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(use_dac, "i");
module_param_array(media, int, &num_media, 0);
module_param(rx_copybreak, int, 0);
module_param(use_dac, int, 0);
MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
MODULE_LICENSE("GPL");
......@@ -374,6 +424,8 @@ static int rtl8169_close(struct net_device *dev);
static void rtl8169_set_rx_mode(struct net_device *dev);
static void rtl8169_tx_timeout(struct net_device *dev);
static struct net_device_stats *rtl8169_get_stats(struct net_device *netdev);
static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
void __iomem *);
#ifdef CONFIG_R8169_NAPI
static int rtl8169_poll(struct net_device *dev, int *budget);
#endif
......@@ -390,7 +442,7 @@ static const unsigned int rtl8169_rx_config =
#define PHY_Cap_100_Half_Or_Less PHY_Cap_100_Half | PHY_Cap_10_Full_Or_Less
#define PHY_Cap_100_Full_Or_Less PHY_Cap_100_Full | PHY_Cap_100_Half_Or_Less
static void mdio_write(void *ioaddr, int RegAddr, int value)
static void mdio_write(void __iomem *ioaddr, int RegAddr, int value)
{
int i;
......@@ -405,7 +457,7 @@ static void mdio_write(void *ioaddr, int RegAddr, int value)
}
}
static int mdio_read(void *ioaddr, int RegAddr)
static int mdio_read(void __iomem *ioaddr, int RegAddr)
{
int i, value = -1;
......@@ -423,32 +475,32 @@ static int mdio_read(void *ioaddr, int RegAddr)
return value;
}
static unsigned int rtl8169_tbi_reset_pending(void *ioaddr)
static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr)
{
return RTL_R32(TBICSR) & TBIReset;
}
static unsigned int rtl8169_xmii_reset_pending(void *ioaddr)
static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr)
{
return mdio_read(ioaddr, 0) & 0x8000;
}
static unsigned int rtl8169_tbi_link_ok(void *ioaddr)
static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
{
return RTL_R32(TBICSR) & TBILinkOk;
}
static unsigned int rtl8169_xmii_link_ok(void *ioaddr)
static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
{
return RTL_R8(PHYstatus) & LinkStatus;
}
static void rtl8169_tbi_reset_enable(void *ioaddr)
static void rtl8169_tbi_reset_enable(void __iomem *ioaddr)
{
RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
}
static void rtl8169_xmii_reset_enable(void *ioaddr)
static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
{
unsigned int val;
......@@ -457,7 +509,7 @@ static void rtl8169_xmii_reset_enable(void *ioaddr)
}
static void rtl8169_check_link_status(struct net_device *dev,
struct rtl8169_private *tp, void *ioaddr)
struct rtl8169_private *tp, void __iomem *ioaddr)
{
unsigned long flags;
......@@ -512,11 +564,16 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
strcpy(info->bus_info, pci_name(tp->pci_dev));
}
static int rtl8169_get_regs_len(struct net_device *dev)
{
return R8169_REGS_SIZE;
}
static int rtl8169_set_speed_tbi(struct net_device *dev,
u8 autoneg, u16 speed, u8 duplex)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
int ret = 0;
u32 reg;
......@@ -540,7 +597,7 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
u8 autoneg, u16 speed, u8 duplex)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
int auto_nego, giga_ctrl;
auto_nego = mdio_read(ioaddr, PHY_AUTO_NEGO_REG);
......@@ -602,10 +659,108 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return ret;
}
static u32 rtl8169_get_rx_csum(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
return tp->cp_cmd & RxChkSum;
}
static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
if (data)
tp->cp_cmd |= RxChkSum;
else
tp->cp_cmd &= ~RxChkSum;
RTL_W16(CPlusCmd, tp->cp_cmd);
RTL_R16(CPlusCmd);
spin_unlock_irqrestore(&tp->lock, flags);
return 0;
}
#ifdef CONFIG_R8169_VLAN
static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
struct sk_buff *skb)
{
return (tp->vlgrp && vlan_tx_tag_present(skb)) ?
TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb)) : 0x00;
}
static void rtl8169_vlan_rx_register(struct net_device *dev,
struct vlan_group *grp)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
tp->vlgrp = grp;
if (tp->vlgrp)
tp->cp_cmd |= RxVlan;
else
tp->cp_cmd &= ~RxVlan;
RTL_W16(CPlusCmd, tp->cp_cmd);
RTL_R16(CPlusCmd);
spin_unlock_irqrestore(&tp->lock, flags);
}
static void rtl8169_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct rtl8169_private *tp = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
if (tp->vlgrp)
tp->vlgrp->vlan_devices[vid] = NULL;
spin_unlock_irqrestore(&tp->lock, flags);
}
static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
struct sk_buff *skb)
{
u32 opts2 = desc->opts2;
int ret;
if (tp->vlgrp && (opts2 & RxVlanTag)) {
rtl8169_rx_hwaccel_skb(skb, tp->vlgrp,
be16_to_cpu(opts2 & 0xffff));
ret = 0;
} else
ret = -1;
desc->opts2 = 0;
return ret;
}
#else /* !CONFIG_R8169_VLAN */
static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
struct sk_buff *skb)
{
return 0;
}
static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
struct sk_buff *skb)
{
return -1;
}
#endif
static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
u32 status;
cmd->supported =
......@@ -624,7 +779,7 @@ static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
u8 status;
cmd->supported = SUPPORTED_10baseT_Half |
......@@ -675,15 +830,38 @@ static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
struct rtl8169_private *tp = netdev_priv(dev);
unsigned long flags;
if (regs->len > R8169_REGS_SIZE)
regs->len = R8169_REGS_SIZE;
spin_lock_irqsave(&tp->lock, flags);
memcpy_fromio(p, tp->mmio_addr, regs->len);
spin_unlock_irqrestore(&tp->lock, flags);
}
static struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
.get_link = ethtool_op_get_link,
.get_settings = rtl8169_get_settings,
.set_settings = rtl8169_set_settings,
.get_rx_csum = rtl8169_get_rx_csum,
.set_rx_csum = rtl8169_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = ethtool_op_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_regs = rtl8169_get_regs,
};
static void rtl8169_write_gmii_reg_bit(void *ioaddr, int reg, int bitnum,
static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
int bitval)
{
int val;
......@@ -694,12 +872,13 @@ static void rtl8169_write_gmii_reg_bit(void *ioaddr, int reg, int bitnum,
mdio_write(ioaddr, reg, val & 0xffff);
}
static void rtl8169_get_mac_version(struct rtl8169_private *tp, void *ioaddr)
static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr)
{
const struct {
u32 mask;
int mac_version;
} mac_info[] = {
{ 0x1 << 28, RTL_GIGA_MAC_VER_X },
{ 0x1 << 26, RTL_GIGA_MAC_VER_E },
{ 0x1 << 23, RTL_GIGA_MAC_VER_D },
{ 0x00000000, RTL_GIGA_MAC_VER_B } /* Catch-all */
......@@ -734,7 +913,7 @@ static void rtl8169_print_mac_version(struct rtl8169_private *tp)
dprintk("mac_version == Unknown\n");
}
static void rtl8169_get_phy_version(struct rtl8169_private *tp, void *ioaddr)
static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr)
{
const struct {
u16 mask;
......@@ -780,7 +959,7 @@ static void rtl8169_print_phy_version(struct rtl8169_private *tp)
static void rtl8169_hw_phy_config(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
struct {
u16 regs[5]; /* Beware of bit-sign propagation */
} phy_magic[5] = { {
......@@ -850,7 +1029,7 @@ static void rtl8169_phy_timer(unsigned long __opaque)
struct net_device *dev = (struct net_device *)__opaque;
struct rtl8169_private *tp = netdev_priv(dev);
struct timer_list *timer = &tp->timer;
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
unsigned long timeout = RTL8169_PHY_TIMEOUT;
assert(tp->mac_version > RTL_GIGA_MAC_VER_B);
......@@ -911,41 +1090,65 @@ static inline void rtl8169_request_timer(struct net_device *dev)
add_timer(timer);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
static void rtl8169_netpoll(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
disable_irq(pdev->irq);
rtl8169_interrupt(pdev->irq, dev, NULL);
enable_irq(pdev->irq);
}
#endif
static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
void __iomem *ioaddr)
{
iounmap(ioaddr);
pci_release_regions(pdev);
pci_disable_device(pdev);
free_netdev(dev);
}
static int __devinit
rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
void **ioaddr_out)
void __iomem **ioaddr_out)
{
void *ioaddr = NULL;
void __iomem *ioaddr;
struct net_device *dev;
struct rtl8169_private *tp;
unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
int rc, i, acpi_idle_state = 0, pm_cap;
int rc = -ENOMEM, i, acpi_idle_state = 0, pm_cap;
assert(pdev != NULL);
assert(ioaddr_out != NULL);
*ioaddr_out = NULL;
*dev_out = NULL;
// dev zeroed in alloc_etherdev
dev = alloc_etherdev(sizeof (*tp));
if (dev == NULL) {
printk(KERN_ERR PFX "unable to alloc new ethernet\n");
return -ENOMEM;
goto err_out;
}
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
tp = dev->priv;
tp = netdev_priv(dev);
// enable device (incl. PCI PM wakeup and hotplug setup)
rc = pci_enable_device(pdev);
if (rc) {
printk(KERN_ERR PFX "%s: enable failure\n", pdev->slot_name);
goto err_out;
goto err_out_free_dev;
}
rc = pci_set_mwi(pdev);
if (rc < 0)
goto err_out_disable;
/* save power state before pci_enable_device overwrites it */
pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pm_cap) {
......@@ -956,41 +1159,37 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
} else {
printk(KERN_ERR PFX
"Cannot find PowerManagement capability, aborting.\n");
goto err_out_free_res;
goto err_out_mwi;
}
mmio_start = pci_resource_start(pdev, 1);
mmio_end = pci_resource_end(pdev, 1);
mmio_flags = pci_resource_flags(pdev, 1);
mmio_len = pci_resource_len(pdev, 1);
// make sure PCI base addr 1 is MMIO
if (!(mmio_flags & IORESOURCE_MEM)) {
if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
printk(KERN_ERR PFX
"region #1 not an MMIO resource, aborting\n");
rc = -ENODEV;
goto err_out_disable;
goto err_out_mwi;
}
// check for weird/broken PCI region reporting
if (mmio_len < RTL_MIN_IO_SIZE) {
if (pci_resource_len(pdev, 1) < R8169_REGS_SIZE) {
printk(KERN_ERR PFX "Invalid PCI region size(s), aborting\n");
rc = -ENODEV;
goto err_out_disable;
goto err_out_mwi;
}
rc = pci_request_regions(pdev, MODULENAME);
if (rc) {
printk(KERN_ERR PFX "%s: could not request regions.\n",
pdev->slot_name);
goto err_out_disable;
goto err_out_mwi;
}
tp->cp_cmd = PCIMulRW | RxChkSum;
if ((sizeof(dma_addr_t) > 4) &&
!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac)
!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) {
tp->cp_cmd |= PCIDAC;
else {
dev->features |= NETIF_F_HIGHDMA;
} else {
rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (rc < 0) {
printk(KERN_ERR PFX "DMA configuration failed.\n");
......@@ -998,12 +1197,10 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
}
}
// enable PCI bus-mastering
pci_set_master(pdev);
// ioremap MMIO region
ioaddr = ioremap(mmio_start, mmio_len);
ioaddr = ioremap(pci_resource_start(pdev, 1), R8169_REGS_SIZE);
if (ioaddr == NULL) {
printk(KERN_ERR PFX "cannot remap MMIO, aborting\n");
rc = -EIO;
......@@ -1040,27 +1237,36 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
}
tp->chipset = i;
tp->rx_buf_sz = RX_BUF_SIZE;
*ioaddr_out = ioaddr;
*dev_out = dev;
return 0;
out:
return rc;
err_out_free_res:
pci_release_regions(pdev);
err_out_mwi:
pci_clear_mwi(pdev);
err_out_disable:
pci_disable_device(pdev);
err_out:
err_out_free_dev:
free_netdev(dev);
return rc;
err_out:
*ioaddr_out = NULL;
*dev_out = NULL;
goto out;
}
static int __devinit
rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev = NULL;
struct rtl8169_private *tp = NULL;
void *ioaddr = NULL;
struct rtl8169_private *tp;
void __iomem *ioaddr = NULL;
static int board_idx = -1;
static int printed_version = 0;
u8 autoneg, duplex;
......@@ -1081,10 +1287,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
tp = dev->priv;
tp = netdev_priv(dev);
assert(ioaddr != NULL);
assert(dev != NULL);
assert(tp != NULL);
if (RTL_R8(PHYstatus) & TBI_Enable) {
tp->set_speed = rtl8169_set_speed_tbi;
......@@ -1109,18 +1313,30 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->open = rtl8169_open;
dev->hard_start_xmit = rtl8169_start_xmit;
dev->get_stats = rtl8169_get_stats;
dev->ethtool_ops = &rtl8169_ethtool_ops;
SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
dev->stop = rtl8169_close;
dev->tx_timeout = rtl8169_tx_timeout;
dev->set_multicast_list = rtl8169_set_rx_mode;
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
dev->irq = pdev->irq;
dev->base_addr = (unsigned long) ioaddr;
#ifdef CONFIG_R8169_NAPI
dev->poll = rtl8169_poll;
dev->weight = R8169_NAPI_WEIGHT;
printk(KERN_INFO PFX "NAPI enabled\n");
#endif
#ifdef CONFIG_R8169_VLAN
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->vlan_rx_register = rtl8169_vlan_rx_register;
dev->vlan_rx_kill_vid = rtl8169_vlan_rx_kill_vid;
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = rtl8169_netpoll;
#endif
tp->intr_mask = 0xffff;
tp->pci_dev = pdev;
tp->mmio_addr = ioaddr;
......@@ -1129,10 +1345,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = register_netdev(dev);
if (rc) {
iounmap(ioaddr);
pci_release_regions(pdev);
pci_disable_device(pdev);
free_netdev(dev);
rtl8169_release_board(pdev, dev, ioaddr);
return rc;
}
......@@ -1188,11 +1401,7 @@ rtl8169_remove_one(struct pci_dev *pdev)
assert(tp != NULL);
unregister_netdev(dev);
iounmap(tp->mmio_addr);
pci_release_regions(pdev);
pci_disable_device(pdev);
free_netdev(dev);
rtl8169_release_board(pdev, dev, tp->mmio_addr);
pci_set_drvdata(pdev, NULL);
}
......@@ -1202,7 +1411,7 @@ static int rtl8169_suspend(struct pci_dev *pdev, u32 state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
if (!netif_running(dev))
......@@ -1271,6 +1480,8 @@ rtl8169_open(struct net_device *dev)
if (retval < 0)
goto err_free_rx;
INIT_WORK(&tp->task, NULL, dev);
rtl8169_hw_start(dev);
rtl8169_request_timer(dev);
......@@ -1290,11 +1501,23 @@ rtl8169_open(struct net_device *dev)
goto out;
}
static void rtl8169_hw_reset(void __iomem *ioaddr)
{
/* Disable interrupts */
RTL_W16(IntrMask, 0x0000);
/* Reset the chipset */
RTL_W8(ChipCmd, CmdReset);
/* PCI commit */
RTL_R8(ChipCmd);
}
static void
rtl8169_hw_start(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
u32 i;
/* Soft reset the chip. */
......@@ -1333,8 +1556,6 @@ rtl8169_hw_start(struct net_device *dev)
RTL_W16(CPlusCmd, tp->cp_cmd);
}
tp->cur_rx = 0;
RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32));
RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
......@@ -1358,49 +1579,51 @@ rtl8169_hw_start(struct net_device *dev)
static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
{
desc->addr = 0x0badbadbadbadbadull;
desc->status &= ~cpu_to_le32(OWNbit | RsvdMask);
desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
}
static void rtl8169_free_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
struct RxDesc *desc)
static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
struct sk_buff **sk_buff, struct RxDesc *desc)
{
pci_unmap_single(pdev, le64_to_cpu(desc->addr), RX_BUF_SIZE,
struct pci_dev *pdev = tp->pci_dev;
pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(*sk_buff);
*sk_buff = NULL;
rtl8169_make_unusable_by_asic(desc);
}
static inline void rtl8169_return_to_asic(struct RxDesc *desc)
static inline void rtl8169_return_to_asic(struct RxDesc *desc, int rx_buf_sz)
{
desc->status |= cpu_to_le32(OWNbit + RX_BUF_SIZE);
desc->opts1 |= cpu_to_le32(DescOwn + rx_buf_sz);
}
static inline void rtl8169_give_to_asic(struct RxDesc *desc, dma_addr_t mapping)
static inline void rtl8169_give_to_asic(struct RxDesc *desc, dma_addr_t mapping,
int rx_buf_sz)
{
desc->addr = cpu_to_le64(mapping);
desc->status |= cpu_to_le32(OWNbit + RX_BUF_SIZE);
desc->opts1 |= cpu_to_le32(DescOwn + rx_buf_sz);
}
static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct net_device *dev,
struct sk_buff **sk_buff, struct RxDesc *desc)
static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
struct RxDesc *desc, int rx_buf_sz)
{
struct sk_buff *skb;
dma_addr_t mapping;
int ret = 0;
skb = dev_alloc_skb(RX_BUF_SIZE);
skb = dev_alloc_skb(rx_buf_sz);
if (!skb)
goto err_out;
skb->dev = dev;
skb_reserve(skb, 2);
*sk_buff = skb;
mapping = pci_map_single(pdev, skb->tail, RX_BUF_SIZE,
mapping = pci_map_single(pdev, skb->tail, rx_buf_sz,
PCI_DMA_FROMDEVICE);
rtl8169_give_to_asic(desc, mapping);
rtl8169_give_to_asic(desc, mapping, rx_buf_sz);
out:
return ret;
......@@ -1417,7 +1640,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
for (i = 0; i < NUM_RX_DESC; i++) {
if (tp->Rx_skbuff[i]) {
rtl8169_free_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i,
tp->RxDescArray + i);
}
}
......@@ -1434,8 +1657,8 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
if (tp->Rx_skbuff[i])
continue;
ret = rtl8169_alloc_rx_skb(tp->pci_dev, dev, tp->Rx_skbuff + i,
tp->RxDescArray + i);
ret = rtl8169_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
tp->RxDescArray + i, tp->rx_buf_sz);
if (ret < 0)
break;
}
......@@ -1444,19 +1667,21 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
{
desc->status |= cpu_to_le32(EORbit);
desc->opts1 |= cpu_to_le32(RingEnd);
}
static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
{
tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
}
static int rtl8169_init_ring(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
tp->cur_rx = tp->dirty_rx = 0;
tp->cur_tx = tp->dirty_tx = 0;
memset(tp->TxDescArray, 0x0, NUM_TX_DESC * sizeof (struct TxDesc));
memset(tp->RxDescArray, 0x0, NUM_RX_DESC * sizeof (struct RxDesc));
rtl8169_init_ring_indexes(tp);
memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
......@@ -1471,123 +1696,293 @@ static int rtl8169_init_ring(struct net_device *dev)
return -ENOMEM;
}
static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
struct TxDesc *desc)
{
u32 len = sk_buff[0]->len;
unsigned int len = tx_skb->len;
pci_unmap_single(pdev, le64_to_cpu(desc->addr),
len < ETH_ZLEN ? ETH_ZLEN : len, PCI_DMA_TODEVICE);
pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
desc->opts1 = 0x00;
desc->opts2 = 0x00;
desc->addr = 0x00;
*sk_buff = NULL;
tx_skb->len = 0;
}
static void
rtl8169_tx_clear(struct rtl8169_private *tp)
static void rtl8169_tx_clear(struct rtl8169_private *tp)
{
int i;
unsigned int i;
tp->cur_tx = 0;
for (i = 0; i < NUM_TX_DESC; i++) {
struct sk_buff *skb = tp->Tx_skbuff[i];
for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) {
unsigned int entry = i % NUM_TX_DESC;
struct ring_info *tx_skb = tp->tx_skb + entry;
unsigned int len = tx_skb->len;
if (skb) {
rtl8169_unmap_tx_skb(tp->pci_dev, tp->Tx_skbuff + i,
tp->TxDescArray + i);
dev_kfree_skb(skb);
if (len) {
struct sk_buff *skb = tx_skb->skb;
rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb,
tp->TxDescArray + entry);
if (skb) {
dev_kfree_skb(skb);
tx_skb->skb = NULL;
}
tp->stats.tx_dropped++;
}
}
tp->cur_tx = tp->dirty_tx = 0;
}
static void
rtl8169_tx_timeout(struct net_device *dev)
static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *))
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u8 tmp8;
printk(KERN_INFO "%s: TX Timeout\n", dev->name);
/* disable Tx, if not already */
tmp8 = RTL_R8(ChipCmd);
if (tmp8 & CmdTxEnb)
RTL_W8(ChipCmd, tmp8 & ~CmdTxEnb);
PREPARE_WORK(&tp->task, task, dev);
schedule_delayed_work(&tp->task, 4);
}
/* Disable interrupts by clearing the interrupt mask. */
RTL_W16(IntrMask, 0x0000);
static void rtl8169_wait_for_quiescence(struct net_device *dev)
{
synchronize_irq(dev->irq);
/* Stop a shared interrupt from scavenging while we are. */
spin_lock_irq(&tp->lock);
rtl8169_tx_clear(tp);
spin_unlock_irq(&tp->lock);
/* Wait for any pending NAPI task to complete */
netif_poll_disable(dev);
}
/* ...and finally, reset everything */
rtl8169_hw_start(dev);
static void rtl8169_reinit_task(void *_data)
{
struct net_device *dev = _data;
int ret;
netif_wake_queue(dev);
if (netif_running(dev)) {
rtl8169_wait_for_quiescence(dev);
rtl8169_close(dev);
}
ret = rtl8169_open(dev);
if (unlikely(ret < 0)) {
if (net_ratelimit()) {
printk(PFX KERN_ERR "%s: reinit failure (status = %d)."
" Rescheduling.\n", dev->name, ret);
}
rtl8169_schedule_work(dev, rtl8169_reinit_task);
}
}
static int
rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void rtl8169_reset_task(void *_data)
{
struct net_device *dev = _data;
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned int entry = tp->cur_tx % NUM_TX_DESC;
u32 len = skb->len;
if (unlikely(skb->len < ETH_ZLEN)) {
skb = skb_padto(skb, ETH_ZLEN);
if (!skb)
goto err_update_stats;
len = ETH_ZLEN;
if (!netif_running(dev))
return;
rtl8169_wait_for_quiescence(dev);
rtl8169_rx_interrupt(dev, tp, tp->mmio_addr);
rtl8169_tx_clear(tp);
if (tp->dirty_rx == tp->cur_rx) {
rtl8169_init_ring_indexes(tp);
rtl8169_hw_start(dev);
netif_wake_queue(dev);
} else {
if (net_ratelimit()) {
printk(PFX KERN_EMERG "%s: Rx buffers shortage\n",
dev->name);
}
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
if (!(le32_to_cpu(tp->TxDescArray[entry].status) & OWNbit)) {
}
static void rtl8169_tx_timeout(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
rtl8169_hw_reset(tp->mmio_addr);
/* Let's wait a bit while any (async) irq lands on */
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
u32 opts1)
{
struct skb_shared_info *info = skb_shinfo(skb);
unsigned int cur_frag, entry;
struct TxDesc *txd;
entry = tp->cur_tx;
for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
skb_frag_t *frag = info->frags + cur_frag;
dma_addr_t mapping;
u32 status;
u32 status, len;
void *addr;
mapping = pci_map_single(tp->pci_dev, skb->data, len,
PCI_DMA_TODEVICE);
entry = (entry + 1) % NUM_TX_DESC;
tp->Tx_skbuff[entry] = skb;
tp->TxDescArray[entry].addr = cpu_to_le64(mapping);
txd = tp->TxDescArray + entry;
len = frag->size;
addr = ((void *) page_address(frag->page)) + frag->page_offset;
mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
/* anti gcc 2.95.3 bugware */
status = OWNbit | FSbit | LSbit | len |
(EORbit * !((entry + 1) % NUM_TX_DESC));
tp->TxDescArray[entry].status = cpu_to_le32(status);
RTL_W8(TxPoll, 0x40); //set polling bit
/* anti gcc 2.95.3 bugware (sic) */
status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
dev->trans_start = jiffies;
txd->opts1 = cpu_to_le32(status);
txd->addr = cpu_to_le64(mapping);
tp->cur_tx++;
smp_wmb();
} else
goto err_drop;
tp->tx_skb[entry].len = len;
}
if (cur_frag) {
tp->tx_skb[entry].skb = skb;
txd->opts1 |= cpu_to_le32(LastFrag);
}
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) {
u32 dirty = tp->dirty_tx;
return cur_frag;
}
static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
{
if (dev->features & NETIF_F_TSO) {
u32 mss = skb_shinfo(skb)->tso_size;
if (mss)
return LargeSend | ((mss & MSSMask) << MSSShift);
}
if (skb->ip_summed == CHECKSUM_HW) {
const struct iphdr *ip = skb->nh.iph;
if (ip->protocol == IPPROTO_TCP)
return IPCS | TCPCS;
else if (ip->protocol == IPPROTO_UDP)
return IPCS | UDPCS;
WARN_ON(1); /* we need a WARN() */
}
return 0;
}
static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC;
struct TxDesc *txd = tp->TxDescArray + entry;
void __iomem *ioaddr = tp->mmio_addr;
dma_addr_t mapping;
u32 status, len;
u32 opts1;
int ret = 0;
if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
dev->name);
goto err_stop;
}
if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
goto err_stop;
opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
frags = rtl8169_xmit_frags(tp, skb, opts1);
if (frags) {
len = skb_headlen(skb);
opts1 |= FirstFrag;
} else {
len = skb->len;
if (unlikely(len < ETH_ZLEN)) {
skb = skb_padto(skb, ETH_ZLEN);
if (!skb)
goto err_update_stats;
len = ETH_ZLEN;
}
opts1 |= FirstFrag | LastFrag;
tp->tx_skb[entry].skb = skb;
}
mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
tp->tx_skb[entry].len = len;
txd->addr = cpu_to_le64(mapping);
txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
wmb();
/* anti gcc 2.95.3 bugware (sic) */
status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
txd->opts1 = cpu_to_le32(status);
dev->trans_start = jiffies;
tp->cur_tx += frags + 1;
smp_wmb();
RTL_W8(TxPoll, 0x40); //set polling bit
if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
netif_stop_queue(dev);
smp_rmb();
if (dirty != tp->dirty_tx)
if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
netif_wake_queue(dev);
}
out:
return 0;
return ret;
err_drop:
dev_kfree_skb(skb);
err_stop:
netif_stop_queue(dev);
ret = 1;
err_update_stats:
tp->stats.tx_dropped++;
goto out;
}
static void rtl8169_pcierr_interrupt(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
void __iomem *ioaddr = tp->mmio_addr;
u16 pci_status, pci_cmd;
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
pci_read_config_word(pdev, PCI_STATUS, &pci_status);
printk(KERN_ERR PFX "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
dev->name, pci_cmd, pci_status);
/*
* The recovery sequence below admits a very elaborated explanation:
* - it seems to work;
* - I did not see what else could be done.
*
* Feel free to adjust to your needs.
*/
pci_write_config_word(pdev, PCI_COMMAND,
pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
pci_write_config_word(pdev, PCI_STATUS,
pci_status & (PCI_STATUS_DETECTED_PARITY |
PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
/* The infamous DAC f*ckup only happens at boot time */
if ((tp->cp_cmd & PCIDAC) && (tp->dirty_rx == tp->cur_rx == 0)) {
printk(KERN_INFO PFX "%s: disabling PCI DAC.\n", dev->name);
tp->cp_cmd &= ~PCIDAC;
RTL_W16(CPlusCmd, tp->cp_cmd);
dev->features &= ~NETIF_F_HIGHDMA;
rtl8169_schedule_work(dev, rtl8169_reinit_task);
}
rtl8169_hw_reset(ioaddr);
}
static void
rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
void *ioaddr)
void __iomem *ioaddr)
{
unsigned int dirty_tx, tx_left;
......@@ -1601,22 +1996,24 @@ rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
while (tx_left > 0) {
unsigned int entry = dirty_tx % NUM_TX_DESC;
struct sk_buff *skb = tp->Tx_skbuff[entry];
struct ring_info *tx_skb = tp->tx_skb + entry;
u32 len = tx_skb->len;
u32 status;
rmb();
status = le32_to_cpu(tp->TxDescArray[entry].status);
if (status & OWNbit)
status = le32_to_cpu(tp->TxDescArray[entry].opts1);
if (status & DescOwn)
break;
/* FIXME: is it really accurate for TxErr ? */
tp->stats.tx_bytes += skb->len >= ETH_ZLEN ?
skb->len : ETH_ZLEN;
tp->stats.tx_bytes += len;
tp->stats.tx_packets++;
rtl8169_unmap_tx_skb(tp->pci_dev, tp->Tx_skbuff + entry,
tp->TxDescArray + entry);
dev_kfree_skb_irq(skb);
tp->Tx_skbuff[entry] = NULL;
rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
if (status & LastFrag) {
dev_kfree_skb_irq(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;
tx_left--;
}
......@@ -1624,14 +2021,28 @@ rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
if (tp->dirty_tx != dirty_tx) {
tp->dirty_tx = dirty_tx;
smp_wmb();
if (netif_queue_stopped(dev))
if (netif_queue_stopped(dev) &&
(TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
netif_wake_queue(dev);
}
}
}
static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
{
u32 opts1 = desc->opts1;
u32 status = opts1 & RxProtoMask;
if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
((status == RxProtoIP) && !(opts1 & IPFail)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
}
static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
struct RxDesc *desc,
struct net_device *dev)
struct RxDesc *desc, int rx_buf_sz)
{
int ret = -1;
......@@ -1640,11 +2051,10 @@ static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
skb = dev_alloc_skb(pkt_size + 2);
if (skb) {
skb->dev = dev;
skb_reserve(skb, 2);
eth_copy_and_sum(skb, sk_buff[0]->tail, pkt_size, 0);
*sk_buff = skb;
rtl8169_return_to_asic(desc);
rtl8169_return_to_asic(desc, rx_buf_sz);
ret = 0;
}
}
......@@ -1653,7 +2063,7 @@ static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
static int
rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
void *ioaddr)
void __iomem *ioaddr)
{
unsigned int cur_rx, rx_left, count;
int delta;
......@@ -1671,9 +2081,9 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
u32 status;
rmb();
status = le32_to_cpu(tp->RxDescArray[entry].status);
status = le32_to_cpu(tp->RxDescArray[entry].opts1);
if (status & OWNbit)
if (status & DescOwn)
break;
if (status & RxRES) {
printk(KERN_INFO "%s: Rx ERROR!!!\n", dev->name);
......@@ -1689,22 +2099,27 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
void (*pci_action)(struct pci_dev *, dma_addr_t,
size_t, int) = pci_dma_sync_single_for_device;
rtl8169_rx_csum(skb, desc);
pci_dma_sync_single_for_cpu(tp->pci_dev,
le64_to_cpu(desc->addr), RX_BUF_SIZE,
le64_to_cpu(desc->addr), tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
if (rtl8169_try_rx_copy(&skb, pkt_size, desc, dev)) {
if (rtl8169_try_rx_copy(&skb, pkt_size, desc,
tp->rx_buf_sz)) {
pci_action = pci_unmap_single;
tp->Rx_skbuff[entry] = NULL;
}
pci_action(tp->pci_dev, le64_to_cpu(desc->addr),
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb->dev = dev;
skb_put(skb, pkt_size);
skb->protocol = eth_type_trans(skb, dev);
rtl8169_rx_skb(skb);
if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0)
rtl8169_rx_skb(skb);
dev->last_rx = jiffies;
tp->stats.rx_bytes += pkt_size;
......@@ -1745,10 +2160,13 @@ rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
struct net_device *dev = (struct net_device *) dev_instance;
struct rtl8169_private *tp = netdev_priv(dev);
int boguscnt = max_interrupt_work;
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
int status = 0;
int handled = 0;
if (unlikely(!netif_running(dev)))
goto out;
do {
status = RTL_R16(IntrStatus);
......@@ -1766,11 +2184,7 @@ rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
break;
if (unlikely(status & SYSErr)) {
printk(KERN_ERR PFX "%s: PCI error (status: 0x%04x)."
" Device disabled.\n", dev->name, status);
RTL_W8(ChipCmd, 0x00);
RTL_W16(IntrMask, 0x0000);
RTL_R16(IntrMask);
rtl8169_pcierr_interrupt(dev);
break;
}
......@@ -1784,7 +2198,7 @@ rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
if (likely(netif_rx_schedule_prep(dev)))
__netif_rx_schedule(dev);
else {
printk(KERN_INFO "%s: interrupt %x taken in poll\n",
printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
dev->name, status);
}
break;
......@@ -1807,6 +2221,7 @@ rtl8169_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
/* Clear all interrupt sources. */
RTL_W16(IntrStatus, 0xffff);
}
out:
return IRQ_RETVAL(handled);
}
......@@ -1815,7 +2230,7 @@ static int rtl8169_poll(struct net_device *dev, int *budget)
{
unsigned int work_done, work_to_do = min(*budget, dev->quota);
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
work_done = rtl8169_rx_interrupt(dev, tp, ioaddr);
rtl8169_tx_interrupt(dev, tp, ioaddr);
......@@ -1845,10 +2260,12 @@ rtl8169_close(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
netif_stop_queue(dev);
flush_scheduled_work();
rtl8169_delete_timer(dev);
spin_lock_irq(&tp->lock);
......@@ -1865,9 +2282,10 @@ rtl8169_close(struct net_device *dev)
spin_unlock_irq(&tp->lock);
synchronize_irq(dev->irq);
free_irq(dev->irq, dev);
netif_poll_disable(dev);
rtl8169_tx_clear(tp);
rtl8169_rx_clear(tp);
......@@ -1886,7 +2304,7 @@ static void
rtl8169_set_rx_mode(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
u32 mc_filter[2]; /* Multicast hash filter */
int i, rx_mode;
......@@ -1938,7 +2356,7 @@ rtl8169_set_rx_mode(struct net_device *dev)
static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
if (netif_running(dev)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment