Commit c994ae17 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://gkernel.bkbits.net/net-drivers-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 48273525 7667f5e4
Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
==============================================================
September 13, 2004
November 17, 2004
Contents
......@@ -18,9 +18,7 @@ In This Release
===============
This file describes the Linux* Base Driver for the Intel(R) PRO/100 Family of
Adapters, version 3.2.x. This driver includes support for Itanium(TM)2 and
EM64T systems.
Adapters, version 3.3.x. This driver supports 2.4.x and 2.6.x kernels.
Identifying Your Adapter
========================
......@@ -119,12 +117,6 @@ Additional Configurations
The latest release of ethtool can be found at:
http://sf.net/projects/gkernel.
After ethtool is installed, ethtool-copy.h must be copied and renamed to
ethtool.h in your kernel source tree at <linux_kernel_src>/include/linux.
Backup the original ethtool.h as needed before copying. The driver then
must be recompiled in order to take advantage of the latest ethtool
features.
NOTE: This driver uses mii support from the kernel. As a result, when
there is no link, ethtool will report speed/duplex to be 10/half.
......
Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters
===============================================================
September 13, 2004
November 17, 2004
Contents
......@@ -20,8 +20,7 @@ In This Release
===============
This file describes the Linux* Base Driver for the Intel(R) PRO/1000 Family
of Adapters, version 5.x.x. This driver includes support for Itanium(TM)2
and EM64T systems.
of Adapters, version 5.x.x.
For questions related to hardware requirements, refer to the documentation
supplied with your Intel PRO/1000 adapter. All hardware requirements listed
......@@ -145,9 +144,11 @@ Valid Range: 80-256 for 82542 and 82543-based adapters
Default Value: 256
This value is the number of receive descriptors allocated by the driver.
Increasing this value allows the driver to buffer more incoming packets.
Each descriptor is 16 bytes. A receive buffer is also allocated for each
descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
on the MTU setting. The maximum MTU size is 16110.
Each descriptor is 16 bytes. A receive buffer is allocated for each
descriptor and can either be 2048 or 4096 bytes long, depending on the MTU
setting. An incoming packet can span one or more receive descriptors.
The maximum MTU size is 16110.
NOTE: MTU designates the frame size. It only needs to be set for Jumbo
Frames.
......@@ -251,17 +252,16 @@ For copper-based boards, the keywords interact as follows:
also be forced.
The AutoNeg parameter is used when more control is required over the auto-
negotiation process. When this parameter is used, Speed and Duplex must not
be specified. This parameter is a bitmap that specifies which speed and
duplex settings are advertised to the link partner.
negotiation process. When this parameter is used, Speed and Duplex parameters
must not be specified. The following table describes supported values for the
AutoNeg parameter:
Bit 7 6 5 4 3 2 1 0
Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
Duplex Full Full Half Full Half
Speed (Mbps) 1000 100 100 10 10
Duplex Full Full Half Full Half
Value (in base 16) 0x20 0x08 0x04 0x02 0x01
For example to limit the negotiated speed/duplex on the interface to 10 Mbps
Half or Full duplex, set AutoNeg to 0x02:
insmod e1000 AutoNeg=0x02
Example: insmod e1000 AutoNeg=0x03, loads e1000 and specifies (10 full duplex,
10 half duplex) for negotiation with the peer.
Note that setting AutoNeg does not guarantee that the board will link at the
highest specified speed or duplex mode, but the board will link at the
......@@ -333,11 +333,7 @@ Additional Configurations
version 1.6 or later is required for this functionality.
The latest release of ethtool can be found from
http://sf.net/projects/gkernel. After ethtool is installed,
ethtool-copy.h must be copied and renamed to ethtool.h in your kernel
source tree at <linux_kernel_src>/include/linux. Backup the original
ethtool.h as needed before copying. The driver then must be recompiled
in order to take advantage of the latest ethtool features.
http://sf.net/projects/gkernel.
NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support
for a more complete ethtool feature set can be enabled by upgrading
......
......@@ -209,6 +209,9 @@ static int el3_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data);
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
static int el3_device_remove (struct device *device);
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
static void el3_poll_controller(struct net_device *dev);
#endif
#ifdef CONFIG_EISA
struct eisa_device_id el3_eisa_ids[] = {
......@@ -321,6 +324,9 @@ static int __init el3_common_init(struct net_device *dev)
dev->set_multicast_list = &set_multicast_list;
dev->tx_timeout = el3_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = el3_poll_controller;
#endif
SET_ETHTOOL_OPS(dev, &ethtool_ops);
err = register_netdev(dev);
......@@ -999,6 +1005,19 @@ el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling receive - used by netconsole and other diagnostic tools
* to allow network i/o with interrupts disabled.
*/
static void el3_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
el3_interrupt(dev->irq, dev, NULL);
enable_irq(dev->irq);
}
#endif
static struct net_device_stats *
el3_get_stats(struct net_device *dev)
{
......
......@@ -398,6 +398,8 @@ static void cp_clean_rings (struct cp_private *cp);
static struct pci_device_id cp_pci_tbl[] = {
{ PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
{ PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
{ },
};
MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
......
......@@ -155,7 +155,7 @@
#define DRV_NAME "e100"
#define DRV_EXT "-NAPI"
#define DRV_VERSION "3.2.3-k2"DRV_EXT
#define DRV_VERSION "3.3.6-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation"
#define PFX DRV_NAME ": "
......@@ -201,6 +201,7 @@ static struct pci_device_id e100_id_table[] = {
INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
......@@ -209,7 +210,6 @@ static struct pci_device_id e100_id_table[] = {
INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
......@@ -621,8 +621,7 @@ static int e100_self_test(struct nic *nic)
writel(selftest | dma_addr, &nic->csr->port);
e100_write_flush(nic);
/* Wait 10 msec for self-test to complete */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ / 100 + 1);
msleep(10);
/* Interrupts are enabled after self-test */
e100_disable_irq(nic);
......@@ -670,8 +669,7 @@ static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
e100_write_flush(nic); udelay(4);
}
/* Wait 10 msec for cmd to complete */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ / 100 + 1);
msleep(10);
/* Chip deselect */
writeb(0, &nic->csr->eeprom_ctrl_lo);
......@@ -1760,8 +1758,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
memset(skb->data, 0xFF, ETH_DATA_LEN);
e100_xmit_frame(skb, nic->netdev);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ / 100 + 1);
msleep(10);
if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
skb->data, ETH_DATA_LEN))
......@@ -1847,8 +1844,7 @@ static void e100_get_regs(struct net_device *netdev,
mdio_read(netdev, nic->mii.phy_id, i);
memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
e100_exec_cb(nic, NULL, e100_dump);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ / 100 + 1);
msleep(10);
memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
sizeof(nic->mem->dump_buf));
}
......@@ -2027,8 +2023,7 @@ static int e100_phys_id(struct net_device *netdev, u32 data)
if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
mod_timer(&nic->blink_timer, jiffies);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(data * HZ);
msleep_interruptible(data * 1000);
del_timer_sync(&nic->blink_timer);
mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
......
......@@ -776,7 +776,7 @@ static int
e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
{
struct net_device *netdev = adapter->netdev;
uint32_t icr, mask, i=0, shared_int = TRUE;
uint32_t mask, i=0, shared_int = TRUE;
uint32_t irq = adapter->pdev->irq;
*data = 0;
......@@ -784,7 +784,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
/* Hook up test interrupt handler just for this test */
if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
shared_int = FALSE;
} else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, netdev->name, netdev)){
} else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ,
netdev->name, netdev)){
*data = 1;
return -1;
}
......@@ -793,21 +794,6 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
msec_delay(10);
/* Interrupts are disabled, so read interrupt cause
* register (icr) twice to verify that there are no interrupts
* pending. icr is clear on read.
*/
icr = E1000_READ_REG(&adapter->hw, ICR);
icr = E1000_READ_REG(&adapter->hw, ICR);
if(icr != 0) {
/* if icr is non-zero, there is no point
* running other interrupt tests.
*/
*data = 2;
i = 10;
}
/* Test each interrupt */
for(; i < 10; i++) {
......@@ -856,8 +842,10 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
* test failed.
*/
adapter->test_icr = 0;
E1000_WRITE_REG(&adapter->hw, IMC, ~mask);
E1000_WRITE_REG(&adapter->hw, ICS, ~mask);
E1000_WRITE_REG(&adapter->hw, IMC,
(~mask & 0x00007FFF));
E1000_WRITE_REG(&adapter->hw, ICS,
(~mask & 0x00007FFF));
msec_delay(10);
if(adapter->test_icr) {
......@@ -1336,10 +1324,17 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
msec_delay(200);
pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[0].dma,
rxdr->buffer_info[0].length, PCI_DMA_FROMDEVICE);
i = 0;
do {
pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[i].dma,
rxdr->buffer_info[i].length,
PCI_DMA_FROMDEVICE);
if (!e1000_check_lbtest_frame(rxdr->buffer_info[i++].skb, 1024))
return 0;
} while (i < 64);
return e1000_check_lbtest_frame(rxdr->buffer_info[0].skb, 1024);
return 13;
}
static int
......@@ -1358,10 +1353,27 @@ static int
e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
{
*data = 0;
e1000_check_for_link(&adapter->hw);
if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
*data = 1;
if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
int i = 0;
adapter->hw.serdes_link_down = TRUE;
/* on some blade server designs link establishment */
/* could take as long as 2-3 minutes. */
do {
e1000_check_for_link(&adapter->hw);
if (adapter->hw.serdes_link_down == FALSE)
return *data;
msec_delay(20);
} while (i++ < 3750);
*data = 1;
} else {
e1000_check_for_link(&adapter->hw);
if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
*data = 1;
}
}
return *data;
}
......@@ -1490,6 +1502,8 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
case E1000_DEV_ID_82543GC_COPPER:
case E1000_DEV_ID_82544EI_FIBER:
case E1000_DEV_ID_82546EB_QUAD_COPPER:
case E1000_DEV_ID_82545EM_FIBER:
case E1000_DEV_ID_82545EM_COPPER:
return wol->wolopts ? -EOPNOTSUPP : 0;
case E1000_DEV_ID_82546EB_FIBER:
......@@ -1554,9 +1568,7 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
e1000_setup_led(&adapter->hw);
mod_timer(&adapter->blink_timer, jiffies);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(data * HZ);
msleep_interruptible(data * 1000);
del_timer_sync(&adapter->blink_timer);
e1000_led_off(&adapter->hw);
clear_bit(E1000_LED_ON, &adapter->led_status);
......
......@@ -123,16 +123,31 @@ e1000_set_phy_type(struct e1000_hw *hw)
static void
e1000_phy_init_script(struct e1000_hw *hw)
{
uint32_t ret_val;
uint16_t phy_saved_data;
DEBUGFUNC("e1000_phy_init_script");
if(hw->phy_init_script) {
msec_delay(20);
/* Save off the current value of register 0x2F5B to be restored at
* the end of this routine. */
ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
/* Disabled the PHY transmitter */
e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
msec_delay(20);
e1000_write_phy_reg(hw,0x0000,0x0140);
msec_delay(5);
if(hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547) {
switch(hw->mac_type) {
case e1000_82541:
case e1000_82547:
e1000_write_phy_reg(hw, 0x1F95, 0x0001);
e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
......@@ -150,12 +165,23 @@ e1000_phy_init_script(struct e1000_hw *hw)
e1000_write_phy_reg(hw, 0x1F96, 0x003F);
e1000_write_phy_reg(hw, 0x2010, 0x0008);
} else {
break;
case e1000_82541_rev_2:
case e1000_82547_rev_2:
e1000_write_phy_reg(hw, 0x1F73, 0x0099);
break;
default:
break;
}
e1000_write_phy_reg(hw, 0x0000, 0x3300);
msec_delay(20);
/* Now enable the transmitter */
e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
if(hw->mac_type == e1000_82547) {
uint16_t fused, fine, coarse;
......@@ -244,6 +270,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82546GB_COPPER:
case E1000_DEV_ID_82546GB_FIBER:
case E1000_DEV_ID_82546GB_SERDES:
case E1000_DEV_ID_82546GB_PCIE:
hw->mac_type = e1000_82546_rev_3;
break;
case E1000_DEV_ID_82541EI:
......@@ -967,7 +994,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
hw->dsp_config_state = e1000_dsp_config_disabled;
/* Force MDI for IGP B-0 PHY */
/* Force MDI for earlier revs of the IGP PHY */
phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX |
IGP01E1000_PSCR_FORCE_MDI_MDIX);
hw->mdix = 1;
......@@ -2111,7 +2138,7 @@ e1000_check_for_link(struct e1000_hw *hw)
* at gigabit speed, then TBI compatibility is not needed. If we are
* at gigabit speed, we turn on TBI compatibility.
*/
if(hw->tbi_compatibility_en) {
if(hw->tbi_compatibility_en) {
uint16_t speed, duplex;
e1000_get_speed_and_duplex(hw, &speed, &duplex);
if(speed != SPEED_1000) {
......@@ -2466,12 +2493,14 @@ e1000_read_phy_reg(struct e1000_hw *hw,
DEBUGFUNC("e1000_read_phy_reg");
if(hw->phy_type == e1000_phy_igp &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
(uint16_t)reg_addr);
if(ret_val)
if(ret_val) {
return ret_val;
}
}
ret_val = e1000_read_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT & reg_addr,
......@@ -2570,12 +2599,14 @@ e1000_write_phy_reg(struct e1000_hw *hw,
DEBUGFUNC("e1000_write_phy_reg");
if(hw->phy_type == e1000_phy_igp &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
(uint16_t)reg_addr);
if(ret_val)
if(ret_val) {
return ret_val;
}
}
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT & reg_addr,
......@@ -3478,7 +3509,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
/* A check for invalid values: offset too large, too many words, and not
* enough words.
*/
if((offset > eeprom->word_size) || (words > eeprom->word_size - offset) ||
if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
(words == 0)) {
DEBUGOUT("\"words\" parameter out of bounds\n");
return -E1000_ERR_EEPROM;
......@@ -3626,7 +3657,7 @@ e1000_write_eeprom(struct e1000_hw *hw,
/* A check for invalid values: offset too large, too many words, and not
* enough words.
*/
if((offset > eeprom->word_size) || (words > eeprom->word_size - offset) ||
if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
(words == 0)) {
DEBUGOUT("\"words\" parameter out of bounds\n");
return -E1000_ERR_EEPROM;
......@@ -4918,7 +4949,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
boolean_t link_up)
{
int32_t ret_val;
uint16_t phy_data, speed, duplex, i;
uint16_t phy_data, phy_saved_data, speed, duplex, i;
uint16_t dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
{IGP01E1000_PHY_AGC_PARAM_A,
IGP01E1000_PHY_AGC_PARAM_B,
......@@ -4999,6 +5030,21 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
}
} else {
if(hw->dsp_config_state == e1000_dsp_config_activated) {
/* Save off the current value of register 0x2F5B to be restored at
* the end of the routines. */
ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
if(ret_val)
return ret_val;
/* Disable the PHY transmitter */
ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
if(ret_val)
return ret_val;
msec_delay(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
IGP01E1000_IEEE_FORCE_GIGA);
if(ret_val)
......@@ -5021,10 +5067,33 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
if(ret_val)
return ret_val;
msec_delay(20);
/* Now enable the transmitter */
ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
if(ret_val)
return ret_val;
hw->dsp_config_state = e1000_dsp_config_enabled;
}
if(hw->ffe_config_state == e1000_ffe_config_active) {
/* Save off the current value of register 0x2F5B to be restored at
* the end of the routines. */
ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
if(ret_val)
return ret_val;
/* Disable the PHY transmitter */
ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
if(ret_val)
return ret_val;
msec_delay(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
IGP01E1000_IEEE_FORCE_GIGA);
if(ret_val)
......@@ -5038,6 +5107,15 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
IGP01E1000_IEEE_RESTART_AUTONEG);
if(ret_val)
return ret_val;
msec_delay(20);
/* Now enable the transmitter */
ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
if(ret_val)
return ret_val;
hw->ffe_config_state = e1000_ffe_config_enabled;
}
}
......@@ -5126,14 +5204,29 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
* Dx states where the power conservation is most important. During
* driver activity we should enable SmartSpeed, so performance is
* maintained. */
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
if(ret_val)
return ret_val;
if (hw->smart_speed == e1000_smart_speed_on) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
&phy_data);
if(ret_val)
return ret_val;
phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
if(ret_val)
return ret_val;
phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
phy_data);
if(ret_val)
return ret_val;
} else if (hw->smart_speed == e1000_smart_speed_off) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
&phy_data);
if (ret_val)
return ret_val;
phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
phy_data);
if(ret_val)
return ret_val;
}
} else if((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
(hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
......
......@@ -167,6 +167,12 @@ typedef enum {
e1000_downshift_undefined = 0xFF
} e1000_downshift;
typedef enum {
e1000_smart_speed_default = 0,
e1000_smart_speed_on,
e1000_smart_speed_off
} e1000_smart_speed;
typedef enum {
e1000_polarity_reversal_enabled = 0,
e1000_polarity_reversal_disabled,
......@@ -361,6 +367,7 @@ int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
#define E1000_DEV_ID_82546GB_COPPER 0x1079
#define E1000_DEV_ID_82546GB_FIBER 0x107A
#define E1000_DEV_ID_82546GB_SERDES 0x107B
#define E1000_DEV_ID_82546GB_PCIE 0x108A
#define E1000_DEV_ID_82547EI 0x1019
#define NODE_ADDRESS_SIZE 6
#define ETH_LENGTH_OF_ADDRESS 6
......@@ -1026,6 +1033,7 @@ struct e1000_hw {
uint8_t perm_mac_addr[NODE_ADDRESS_SIZE];
boolean_t disable_polarity_correction;
boolean_t speed_downgraded;
e1000_smart_speed smart_speed;
e1000_dsp_config dsp_config_state;
boolean_t get_link_status;
boolean_t serdes_link_down;
......
......@@ -35,10 +35,19 @@
* - More errlogging support from Jon Mason <jonmason@us.ibm.com>
* - Fix TSO issues on PPC64 machines -- Jon Mason <jonmason@us.ibm.com>
*
* 5.3.11 6/4/04
* - ethtool register dump reads MANC register conditionally.
*
* 5.3.10 6/1/04
* 5.6.5 11/01/04
* - Enabling NETIF_F_SG without checksum offload is illegal -
John Mason <jdmason@us.ibm.com>
* 5.6.3 10/26/04
* - Remove redundant initialization - Jamal Hadi
* - Reset buffer_info->dma in tx resource cleanup logic
* 5.6.2 10/12/04
* - Avoid filling tx_ring completely - shemminger@osdl.org
* - Replace schedule_timeout() with msleep()/msleep_interruptible() -
* nacc@us.ibm.com
* - Sparse cleanup - shemminger@osdl.org
* - Fix tx resource cleanup logic
* - LLTX support - ak@suse.de and hadi@cyberus.ca
*/
char e1000_driver_name[] = "e1000";
......@@ -48,7 +57,7 @@ char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
char e1000_driver_version[] = "5.5.4-k2"DRIVERNAPI;
char e1000_driver_version[] = "5.6.10.1-k2"DRIVERNAPI;
char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation.";
/* e1000_pci_tbl - PCI Device ID Table
......@@ -90,6 +99,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x107A),
INTEL_E1000_ETHERNET_DEVICE(0x107B),
INTEL_E1000_ETHERNET_DEVICE(0x107C),
INTEL_E1000_ETHERNET_DEVICE(0x108A),
/* required last entry */
{0,}
};
......@@ -128,8 +138,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
static int e1000_set_mac(struct net_device *netdev, void *p);
static void e1000_irq_disable(struct e1000_adapter *adapter);
static void e1000_irq_enable(struct e1000_adapter *adapter);
static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
#ifdef CONFIG_E1000_NAPI
......@@ -146,9 +154,6 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
void set_ethtool_ops(struct net_device *netdev);
static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
static void e1000_rx_checksum(struct e1000_adapter *adapter,
struct e1000_rx_desc *rx_desc,
struct sk_buff *skb);
static void e1000_tx_timeout(struct net_device *dev);
static void e1000_tx_timeout_task(struct net_device *dev);
static void e1000_smartspeed(struct e1000_adapter *adapter);
......@@ -242,6 +247,33 @@ e1000_exit_module(void)
module_exit(e1000_exit_module);
/**
* e1000_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
static inline void
e1000_irq_disable(struct e1000_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
E1000_WRITE_REG(&adapter->hw, IMC, ~0);
E1000_WRITE_FLUSH(&adapter->hw);
synchronize_irq(adapter->pdev->irq);
}
/**
* e1000_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
static inline void
e1000_irq_enable(struct e1000_adapter *adapter)
{
if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
E1000_WRITE_FLUSH(&adapter->hw);
}
}
int
e1000_up(struct e1000_adapter *adapter)
......@@ -475,8 +507,6 @@ e1000_probe(struct pci_dev *pdev,
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
} else {
netdev->features = NETIF_F_SG;
}
#ifdef NETIF_F_TSO
......@@ -1061,6 +1091,24 @@ e1000_free_tx_resources(struct e1000_adapter *adapter)
adapter->tx_ring.desc = NULL;
}
static inline void
e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info)
{
struct pci_dev *pdev = adapter->pdev;
if(buffer_info->dma) {
pci_unmap_page(pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
buffer_info->dma = 0;
}
if(buffer_info->skb) {
dev_kfree_skb_any(buffer_info->skb);
buffer_info->skb = NULL;
}
}
/**
* e1000_clean_tx_ring - Free Tx Buffers
* @adapter: board private structure
......@@ -1071,7 +1119,6 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter)
{
struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
struct e1000_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
unsigned long size;
unsigned int i;
......@@ -1079,17 +1126,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter)
for(i = 0; i < tx_ring->count; i++) {
buffer_info = &tx_ring->buffer_info[i];
if(buffer_info->skb) {
pci_unmap_page(pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
}
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
}
size = sizeof(struct e1000_buffer) * tx_ring->count;
......@@ -1762,7 +1799,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int mss = 0;
int count = 0;
unsigned int f;
nr_frags = skb_shinfo(skb)->nr_frags;
len -= skb->data_len;
if(unlikely(skb->len <= 0)) {
......@@ -1811,7 +1847,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */
if(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2) {
if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY;
......@@ -1844,6 +1880,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
netdev->trans_start = jiffies;
/* Make sure there is space in the ring for the next send. */
if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2))
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}
......@@ -1904,9 +1944,9 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
return -EINVAL;
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
return -EINVAL;
}
if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) {
......@@ -2073,34 +2113,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
}
/**
* e1000_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
static void
e1000_irq_disable(struct e1000_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
E1000_WRITE_REG(&adapter->hw, IMC, ~0);
E1000_WRITE_FLUSH(&adapter->hw);
synchronize_irq(adapter->pdev->irq);
}
/**
* e1000_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
static void
e1000_irq_enable(struct e1000_adapter *adapter)
{
if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
E1000_WRITE_FLUSH(&adapter->hw);
}
}
/**
* e1000_intr - Interrupt Handler
* @irq: interrupt number
......@@ -2162,6 +2174,9 @@ e1000_clean(struct net_device *netdev, int *budget)
int tx_cleaned;
int work_done = 0;
if (!netif_carrier_ok(netdev))
goto quit_polling;
tx_cleaned = e1000_clean_tx_irq(adapter);
e1000_clean_rx_irq(adapter, &work_done, work_to_do);
......@@ -2171,7 +2186,7 @@ e1000_clean(struct net_device *netdev, int *budget)
/* if no Rx and Tx cleanup work was done, exit the polling mode */
if(!tx_cleaned || (work_done < work_to_do) ||
!netif_running(netdev)) {
netif_rx_complete(netdev);
quit_polling: netif_rx_complete(netdev);
e1000_irq_enable(adapter);
return 0;
}
......@@ -2190,7 +2205,6 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
{
struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct e1000_buffer *buffer_info;
unsigned int i, eop;
......@@ -2205,19 +2219,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
if(likely(buffer_info->dma)) {
pci_unmap_page(pdev,
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
buffer_info->dma = 0;
}
if(buffer_info->skb) {
dev_kfree_skb_any(buffer_info->skb);
buffer_info->skb = NULL;
}
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
tx_desc->buffer_addr = 0;
tx_desc->lower.data = 0;
tx_desc->upper.data = 0;
......@@ -2243,6 +2245,41 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
return cleaned;
}
/**
* e1000_rx_checksum - Receive Checksum Offload for 82543
* @adapter: board private structure
* @rx_desc: receive descriptor
* @sk_buff: socket buffer with received data
**/
static inline void
e1000_rx_checksum(struct e1000_adapter *adapter,
struct e1000_rx_desc *rx_desc,
struct sk_buff *skb)
{
/* 82543 or newer only */
if(unlikely((adapter->hw.mac_type < e1000_82543) ||
/* Ignore Checksum bit is set */
(rx_desc->status & E1000_RXD_STAT_IXSM) ||
/* TCP Checksum has not been calculated */
(!(rx_desc->status & E1000_RXD_STAT_TCPCS)))) {
skb->ip_summed = CHECKSUM_NONE;
return;
}
/* At this point we know the hardware did the TCP checksum */
/* now look at the TCP checksum error bit */
if(rx_desc->errors & E1000_RXD_ERR_TCPE) {
/* let the stack verify checksum errors */
skb->ip_summed = CHECKSUM_NONE;
adapter->hw_csum_err++;
} else {
/* TCP checksum is good */
skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_good++;
}
}
/**
* e1000_clean_rx_irq - Send received data up the network stack
* @adapter: board private structure
......@@ -2291,7 +2328,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
/* All receives must fit into a single buffer */
E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name);
" buffers\n", netdev->name);
dev_kfree_skb_irq(skb);
goto next_desc;
}
......@@ -2376,8 +2413,8 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
buffer_info = &rx_ring->buffer_info[i];
while(!buffer_info->skb) {
skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
if(unlikely(!skb)) {
/* Better luck next round */
break;
......@@ -2587,41 +2624,6 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return E1000_SUCCESS;
}
/**
* e1000_rx_checksum - Receive Checksum Offload for 82543
* @adapter: board private structure
* @rx_desc: receive descriptor
* @sk_buff: socket buffer with received data
**/
static void
e1000_rx_checksum(struct e1000_adapter *adapter,
struct e1000_rx_desc *rx_desc,
struct sk_buff *skb)
{
/* 82543 or newer only */
if(unlikely((adapter->hw.mac_type < e1000_82543) ||
/* Ignore Checksum bit is set */
(rx_desc->status & E1000_RXD_STAT_IXSM) ||
/* TCP Checksum has not been calculated */
(!(rx_desc->status & E1000_RXD_STAT_TCPCS)))) {
skb->ip_summed = CHECKSUM_NONE;
return;
}
/* At this point we know the hardware did the TCP checksum */
/* now look at the TCP checksum error bit */
if(rx_desc->errors & E1000_RXD_ERR_TCPE) {
/* let the stack verify checksum errors */
skb->ip_summed = CHECKSUM_NONE;
adapter->hw_csum_err++;
} else {
/* TCP checksum is good */
skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_good++;
}
}
void
e1000_pci_set_mwi(struct e1000_hw *hw)
{
......
......@@ -42,13 +42,8 @@
#include <linux/sched.h>
#ifndef msec_delay
#define msec_delay(x) do { if(in_interrupt()) { \
/* Don't mdelay in interrupt context! */ \
BUG(); \
} else { \
set_current_state(TASK_UNINTERRUPTIBLE); \
schedule_timeout((x * HZ)/1000 + 2); \
} } while(0)
#define msec_delay(x) msleep(x)
/* Some workarounds require millisecond delays and are run during interrupt
* context. Most notably, when establishing link, the phy may need tweaking
* but cannot process phy register reads/writes faster than millisecond
......
......@@ -47,7 +47,7 @@
#define E1000_PARAM(X, desc) \
static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static int num_##X = 0; \
module_param_array(X, int, &num_##X, 0); \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
/* Transmit Descriptor Count
......@@ -470,9 +470,6 @@ e1000_check_options(struct e1000_adapter *adapter)
if (num_InterruptThrottleRate > bd) {
adapter->itr = InterruptThrottleRate[bd];
switch(adapter->itr) {
case -1:
adapter->itr = 1;
break;
case 0:
DPRINTK(PROBE, INFO, "%s turned off\n",
opt.name);
......@@ -481,13 +478,14 @@ e1000_check_options(struct e1000_adapter *adapter)
DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
opt.name);
break;
case -1:
default:
e1000_validate_option(&adapter->itr, &opt,
adapter);
break;
}
} else {
adapter->itr = 1;
adapter->itr = opt.def;
}
}
......
......@@ -79,6 +79,8 @@
* 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
* into nv_close, otherwise reenabling for wol can
* cause DMA to kfree'd memory.
* 0.31: 14 Nov 2004: ethtool support for getting/setting link
* capabilities.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
......@@ -90,7 +92,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
#define FORCEDETH_VERSION "0.30"
#define FORCEDETH_VERSION "0.31"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
......@@ -210,6 +212,7 @@ enum {
#define NVREG_LINKSPEED_10 1000
#define NVREG_LINKSPEED_100 100
#define NVREG_LINKSPEED_1000 50
#define NVREG_LINKSPEED_MASK (0xFFF)
NvRegUnknownSetupReg5 = 0x130,
#define NVREG_UNKSETUP5_BIT31 (1<<31)
NvRegUnknownSetupReg3 = 0x13c,
......@@ -441,6 +444,8 @@ struct fe_priv {
int in_shutdown;
u32 linkspeed;
int duplex;
int autoneg;
int fixed_mode;
int phyaddr;
int wolenabled;
unsigned int phy_oui;
......@@ -765,50 +770,6 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
return &np->stats;
}
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct fe_priv *np = get_nvpriv(dev);
strcpy(info->driver, "forcedeth");
strcpy(info->version, FORCEDETH_VERSION);
strcpy(info->bus_info, pci_name(np->pci_dev));
}
static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{
struct fe_priv *np = get_nvpriv(dev);
wolinfo->supported = WAKE_MAGIC;
spin_lock_irq(&np->lock);
if (np->wolenabled)
wolinfo->wolopts = WAKE_MAGIC;
spin_unlock_irq(&np->lock);
}
static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
spin_lock_irq(&np->lock);
if (wolinfo->wolopts == 0) {
writel(0, base + NvRegWakeUpFlags);
np->wolenabled = 0;
}
if (wolinfo->wolopts & WAKE_MAGIC) {
writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
np->wolenabled = 1;
}
spin_unlock_irq(&np->lock);
return 0;
}
static struct ethtool_ops ops = {
.get_drvinfo = nv_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_wol = nv_get_wol,
.set_wol = nv_set_wol,
};
/*
* nv_alloc_rx: fill rx ring entries.
* Return 1 if the allocations for the skbs failed and the
......@@ -1286,6 +1247,25 @@ static int nv_update_linkspeed(struct net_device *dev)
goto set_speed;
}
if (np->autoneg == 0) {
dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
dev->name, np->fixed_mode);
if (np->fixed_mode & LPA_100FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 1;
} else if (np->fixed_mode & LPA_100HALF) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
newdup = 0;
} else if (np->fixed_mode & LPA_10FULL) {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 1;
} else {
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
newdup = 0;
}
retval = 1;
goto set_speed;
}
/* check auto negotiation is complete */
if (!(mii_status & BMSR_ANEGCOMPLETE)) {
/* still in autonegotiation - configure nic for 10 MBit HD and wait. */
......@@ -1303,7 +1283,7 @@ static int nv_update_linkspeed(struct net_device *dev)
if ((control_1000 & ADVERTISE_1000FULL) &&
(status_1000 & LPA_1000FULL)) {
dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
dev->name);
newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
newdup = 1;
......@@ -1362,9 +1342,9 @@ static int nv_update_linkspeed(struct net_device *dev)
phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
if (np->duplex == 0)
phyreg |= PHY_HALF;
if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
phyreg |= PHY_100;
else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
phyreg |= PHY_1000;
writel(phyreg, base + NvRegPhyInterface);
......@@ -1500,6 +1480,227 @@ static void nv_do_nic_poll(unsigned long data)
enable_irq(dev->irq);
}
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct fe_priv *np = get_nvpriv(dev);
strcpy(info->driver, "forcedeth");
strcpy(info->version, FORCEDETH_VERSION);
strcpy(info->bus_info, pci_name(np->pci_dev));
}
static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{
struct fe_priv *np = get_nvpriv(dev);
wolinfo->supported = WAKE_MAGIC;
spin_lock_irq(&np->lock);
if (np->wolenabled)
wolinfo->wolopts = WAKE_MAGIC;
spin_unlock_irq(&np->lock);
}
static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
spin_lock_irq(&np->lock);
if (wolinfo->wolopts == 0) {
writel(0, base + NvRegWakeUpFlags);
np->wolenabled = 0;
}
if (wolinfo->wolopts & WAKE_MAGIC) {
writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
np->wolenabled = 1;
}
spin_unlock_irq(&np->lock);
return 0;
}
static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct fe_priv *np = netdev_priv(dev);
int adv;
spin_lock_irq(&np->lock);
ecmd->port = PORT_MII;
if (!netif_running(dev)) {
/* We do not track link speed / duplex setting if the
* interface is disabled. Force a link check */
nv_update_linkspeed(dev);
}
switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
case NVREG_LINKSPEED_10:
ecmd->speed = SPEED_10;
break;
case NVREG_LINKSPEED_100:
ecmd->speed = SPEED_100;
break;
case NVREG_LINKSPEED_1000:
ecmd->speed = SPEED_1000;
break;
}
ecmd->duplex = DUPLEX_HALF;
if (np->duplex)
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = np->autoneg;
ecmd->advertising = ADVERTISED_MII;
if (np->autoneg) {
ecmd->advertising |= ADVERTISED_Autoneg;
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
} else {
adv = np->fixed_mode;
}
if (adv & ADVERTISE_10HALF)
ecmd->advertising |= ADVERTISED_10baseT_Half;
if (adv & ADVERTISE_10FULL)
ecmd->advertising |= ADVERTISED_10baseT_Full;
if (adv & ADVERTISE_100HALF)
ecmd->advertising |= ADVERTISED_100baseT_Half;
if (adv & ADVERTISE_100FULL)
ecmd->advertising |= ADVERTISED_100baseT_Full;
if (np->autoneg && np->gigabit == PHY_GIGABIT) {
adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
if (adv & ADVERTISE_1000FULL)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
}
ecmd->supported = (SUPPORTED_Autoneg |
SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_MII);
if (np->gigabit == PHY_GIGABIT)
ecmd->supported |= SUPPORTED_1000baseT_Full;
ecmd->phy_address = np->phyaddr;
ecmd->transceiver = XCVR_EXTERNAL;
/* ignore maxtxpkt, maxrxpkt for now */
spin_unlock_irq(&np->lock);
return 0;
}
static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct fe_priv *np = netdev_priv(dev);
if (ecmd->port != PORT_MII)
return -EINVAL;
if (ecmd->transceiver != XCVR_EXTERNAL)
return -EINVAL;
if (ecmd->phy_address != np->phyaddr) {
/* TODO: support switching between multiple phys. Should be
* trivial, but not enabled due to lack of test hardware. */
return -EINVAL;
}
if (ecmd->autoneg == AUTONEG_ENABLE) {
u32 mask;
mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
if (np->gigabit == PHY_GIGABIT)
mask |= ADVERTISED_1000baseT_Full;
if ((ecmd->advertising & mask) == 0)
return -EINVAL;
} else if (ecmd->autoneg == AUTONEG_DISABLE) {
/* Note: autonegotiation disable, speed 1000 intentionally
* forbidden - noone should need that. */
if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
return -EINVAL;
if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
return -EINVAL;
} else {
return -EINVAL;
}
spin_lock_irq(&np->lock);
if (ecmd->autoneg == AUTONEG_ENABLE) {
int adv, bmcr;
np->autoneg = 1;
/* advertise only what has been requested */
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
if (ecmd->advertising & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
if (ecmd->advertising & ADVERTISED_10baseT_Full)
adv |= ADVERTISE_10FULL;
if (ecmd->advertising & ADVERTISED_100baseT_Half)
adv |= ADVERTISE_100HALF;
if (ecmd->advertising & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
if (np->gigabit == PHY_GIGABIT) {
adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
adv &= ~ADVERTISE_1000FULL;
if (ecmd->advertising & ADVERTISED_1000baseT_Full)
adv |= ADVERTISE_1000FULL;
mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
}
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
} else {
int adv, bmcr;
np->autoneg = 0;
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
adv |= ADVERTISE_10HALF;
if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
adv |= ADVERTISE_10FULL;
if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
adv |= ADVERTISE_100HALF;
if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
adv |= ADVERTISE_100FULL;
mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
np->fixed_mode = adv;
if (np->gigabit == PHY_GIGABIT) {
adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
adv &= ~ADVERTISE_1000FULL;
mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
}
bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
bmcr |= BMCR_FULLDPLX;
if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
bmcr |= BMCR_SPEED100;
mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
if (netif_running(dev)) {
/* Wait a bit and then reconfigure the nic. */
udelay(10);
nv_linkchange(dev);
}
}
spin_unlock_irq(&np->lock);
return 0;
}
static struct ethtool_ops ops = {
.get_drvinfo = nv_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_wol = nv_get_wol,
.set_wol = nv_set_wol,
.get_settings = nv_get_settings,
.set_settings = nv_set_settings,
};
static int nv_open(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
......@@ -1550,9 +1751,6 @@ static int nv_open(struct net_device *dev)
base + NvRegRingSizes);
/* 5) continue setup */
np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
np->duplex = 0;
writel(np->linkspeed, base + NvRegLinkSpeed);
writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
writel(np->desc_ver, base + NvRegTxRxControl);
......@@ -1866,6 +2064,11 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
phy_init(dev);
}
/* set default link speed settings */
np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
np->duplex = 0;
np->autoneg = 1;
err = register_netdev(dev);
if (err) {
printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
......
......@@ -63,7 +63,6 @@ VERSION 1.6LK <2004/04/14>
#define RTL8169_VERSION "1.6LK"
#define MODULENAME "r8169"
#define RTL8169_DRIVER_NAME MODULENAME " Gigabit Ethernet driver " RTL8169_VERSION
#define PFX MODULENAME ": "
#ifdef RTL8169_DEBUG
......@@ -112,7 +111,8 @@ static int multicast_filter_limit = 32;
#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
#define RxPacketMaxSize 0x0800 /* Maximum size supported is 16K-1 */
#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
#define R8169_REGS_SIZE 256
......@@ -427,6 +427,9 @@ static void rtl8169_tx_timeout(struct net_device *dev);
static struct net_device_stats *rtl8169_get_stats(struct net_device *netdev);
static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
void __iomem *);
static int rtl8169_change_mtu(struct net_device *netdev, int new_mtu);
static void rtl8169_down(struct net_device *dev);
#ifdef CONFIG_R8169_NAPI
static int rtl8169_poll(struct net_device *dev, int *budget);
#endif
......@@ -560,8 +563,8 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
{
struct rtl8169_private *tp = netdev_priv(dev);
strcpy(info->driver, RTL8169_DRIVER_NAME);
strcpy(info->version, RTL8169_VERSION );
strcpy(info->driver, MODULENAME);
strcpy(info->version, RTL8169_VERSION);
strcpy(info->bus_info, pci_name(tp->pci_dev));
}
......@@ -1238,8 +1241,6 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
}
tp->chipset = i;
tp->rx_buf_sz = RX_BUF_SIZE;
*ioaddr_out = ioaddr;
*dev_out = dev;
out:
......@@ -1280,7 +1281,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
board_idx++;
if (!printed_version) {
printk(KERN_INFO RTL8169_DRIVER_NAME " loaded\n");
printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
MODULENAME, RTL8169_VERSION);
printed_version = 1;
}
......@@ -1321,6 +1323,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
dev->irq = pdev->irq;
dev->base_addr = (unsigned long) ioaddr;
dev->change_mtu = rtl8169_change_mtu;
#ifdef CONFIG_R8169_NAPI
dev->poll = rtl8169_poll;
......@@ -1449,13 +1452,22 @@ static int rtl8169_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
static int
rtl8169_open(struct net_device *dev)
static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
struct net_device *dev)
{
unsigned int mtu = dev->mtu;
tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
}
static int rtl8169_open(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
int retval;
rtl8169_set_rxbufsize(tp, dev);
retval =
request_irq(dev->irq, rtl8169_interrupt, SA_SHIRQ, dev->name, dev);
if (retval < 0)
......@@ -1535,8 +1547,8 @@ rtl8169_hw_start(struct net_device *dev)
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
RTL_W8(EarlyTxThres, EarlyTxThld);
// For gigabit rtl8169
RTL_W16(RxMaxSize, RxPacketMaxSize);
// For gigabit rtl8169, MTU + header + CRC + VLAN
RTL_W16(RxMaxSize, tp->rx_buf_sz);
// Set Rx Config register
i = rtl8169_rx_config |
......@@ -1577,6 +1589,37 @@ rtl8169_hw_start(struct net_device *dev)
netif_start_queue(dev);
}
static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
{
struct rtl8169_private *tp = netdev_priv(dev);
int ret = 0;
if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
return -EINVAL;
dev->mtu = new_mtu;
if (!netif_running(dev))
goto out;
rtl8169_down(dev);
rtl8169_set_rxbufsize(tp, dev);
ret = rtl8169_init_ring(dev);
if (ret < 0)
goto out;
rtl8169_hw_start(dev);
netif_poll_enable(dev);
rtl8169_request_timer(dev);
out:
return ret;
}
static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
{
desc->addr = 0x0badbadbadbadbadull;
......@@ -1743,10 +1786,19 @@ static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *))
static void rtl8169_wait_for_quiescence(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
synchronize_irq(dev->irq);
/* Wait for any pending NAPI task to complete */
netif_poll_disable(dev);
RTL_W16(IntrMask, 0x0000);
RTL_W16(IntrStatus, 0xffff);
netif_poll_enable(dev);
}
static void rtl8169_reinit_task(void *_data)
......@@ -1970,7 +2022,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
/* The infamous DAC f*ckup only happens at boot time */
if ((tp->cp_cmd & PCIDAC) && (tp->dirty_rx == tp->cur_rx == 0)) {
if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
printk(KERN_INFO PFX "%s: disabling PCI DAC.\n", dev->name);
tp->cp_cmd &= ~PCIDAC;
RTL_W16(CPlusCmd, tp->cp_cmd);
......@@ -2256,19 +2308,17 @@ static int rtl8169_poll(struct net_device *dev, int *budget)
}
#endif
static int
rtl8169_close(struct net_device *dev)
static void rtl8169_down(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
void __iomem *ioaddr = tp->mmio_addr;
rtl8169_delete_timer(dev);
netif_stop_queue(dev);
flush_scheduled_work();
rtl8169_delete_timer(dev);
spin_lock_irq(&tp->lock);
/* Stop the chip's Tx and Rx DMA processes. */
......@@ -2283,13 +2333,27 @@ rtl8169_close(struct net_device *dev)
spin_unlock_irq(&tp->lock);
free_irq(dev->irq, dev);
synchronize_irq(dev->irq);
netif_poll_disable(dev);
/* Give a racing hard_start_xmit a few cycles to complete. */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
rtl8169_tx_clear(tp);
rtl8169_rx_clear(tp);
}
static int rtl8169_close(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
rtl8169_down(dev);
free_irq(dev->irq, dev);
pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
......
......@@ -61,7 +61,7 @@ typedef enum xena_max_outstanding_splits {
#define INTR_DBG 4
/* Global variable that defines the present debug level of the driver. */
int debug_level = ERR_DBG; /* Default level. */
static int debug_level = ERR_DBG; /* Default level. */
/* DEBUG message print. */
#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args)
......
......@@ -81,6 +81,25 @@ int tulip_mdio_read(struct net_device *dev, int phy_id, int location)
return retval & 0xffff;
}
if(tp->chip_id == ULI526X && tp->revision >= 0x40) {
int value;
int i = 1000;
value = ioread32(ioaddr + CSR9);
iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
value = (phy_id << 21) | (location << 16) | 0x80000000;
iowrite32(value, ioaddr + CSR10);
while(--i > 0) {
mdio_delay();
if(ioread32(ioaddr + CSR10) & 0x10000000)
break;
}
retval = ioread32(ioaddr + CSR10);
spin_unlock_irqrestore(&tp->mii_lock, flags);
return retval & 0xFFFF;
}
/* Establish sync by sending at least 32 logic ones. */
for (i = 32; i >= 0; i--) {
iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
......@@ -140,7 +159,23 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
spin_unlock_irqrestore(&tp->mii_lock, flags);
return;
}
if (tp->chip_id == ULI526X && tp->revision >= 0x40) {
int value;
int i = 1000;
value = ioread32(ioaddr + CSR9);
iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
value = (phy_id << 21) | (location << 16) | 0x40000000 | (val & 0xFFFF);
iowrite32(value, ioaddr + CSR10);
while(--i > 0) {
if (ioread32(ioaddr + CSR10) & 0x10000000)
break;
}
spin_unlock_irqrestore(&tp->mii_lock, flags);
}
/* Establish sync by sending 32 logic ones. */
for (i = 32; i >= 0; i--) {
iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
......
......@@ -39,6 +39,7 @@ void tulip_timer(unsigned long data)
case MX98713:
case COMPEX9881:
case DM910X:
case ULI526X:
default: {
struct medialeaf *mleaf;
unsigned char *p;
......
......@@ -88,6 +88,7 @@ enum chips {
I21145,
DM910X,
CONEXANT,
ULI526X
};
......@@ -481,8 +482,11 @@ static inline void tulip_stop_rxtx(struct tulip_private *tp)
static inline void tulip_restart_rxtx(struct tulip_private *tp)
{
tulip_stop_rxtx(tp);
udelay(5);
if(!(tp->chip_id == ULI526X &&
(tp->revision == 0x40 || tp->revision == 0x50))) {
tulip_stop_rxtx(tp);
udelay(5);
}
tulip_start_rxtx(tp);
}
......
......@@ -88,9 +88,9 @@ static int rx_copybreak = 100;
ToDo: Non-Intel setting could be better.
*/
#if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
#if defined(__alpha__) || defined(__ia64__)
static int csr0 = 0x01A00000 | 0xE000;
#elif defined(__i386__) || defined(__powerpc__)
#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
static int csr0 = 0x01A00000 | 0x8000;
#elif defined(__sparc__) || defined(__hppa__)
/* The UltraSparc PCI controllers will disconnect at every 64-byte
......@@ -198,6 +198,10 @@ struct tulip_chip_table tulip_tbl[] = {
/* RS7112 */
{ "Conexant LANfinity", 256, 0x0001ebef,
HAS_MII | HAS_ACPI, tulip_timer },
/* ULi526X */
{ "ULi M5261/M5263", 128, 0x0001ebef,
HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, tulip_timer },
};
......@@ -229,12 +233,14 @@ static struct pci_device_id tulip_pci_tbl[] = {
{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
{ 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x10b9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, /* ALi 1563 integrated ethernet */
{ 0x10b9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
{ 0x10b9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
{ } /* terminate list */
};
......@@ -515,7 +521,7 @@ static void tulip_tx_timeout(struct net_device *dev)
dev->name);
} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
|| tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
|| tp->chip_id == DM910X) {
|| tp->chip_id == DM910X || tp->chip_id == ULI526X) {
printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
"SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
......@@ -1216,6 +1222,22 @@ static void __devinit tulip_mwi_config (struct pci_dev *pdev,
}
#endif
/*
* Chips that have the MRM/reserved bit quirk and the burst quirk. That
* is the DM910X and the on chip ULi devices
*/
static int tulip_uli_dm_quirk(struct pci_dev *pdev)
{
if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
return 1;
if (pdev->vendor == 0x10b9 && pdev->device == 0x5261)
return 1;
if (pdev->vendor == 0x10b9 && pdev->device == 0x5263)
return 1;
return 0;
}
static int __devinit tulip_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
......@@ -1304,17 +1326,12 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
/* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
if ((pdev->vendor == 0x1282 && pdev->device == 0x9102)
|| (pdev->vendor == 0x10b9 && pdev->device == 0x5261))
if (tulip_uli_dm_quirk(pdev)) {
csr0 &= ~0x01f100ff;
#if defined(__sparc__)
/* DM9102A needs 32-dword alignment/burst length on sparc - chip bug? */
if ((pdev->vendor == 0x1282 && pdev->device == 0x9102)
|| (pdev->vendor == 0x10b9 && pdev->device == 0x5261))
csr0 = (csr0 & ~0xff00) | 0xe000;
#endif
}
/*
* And back to business
*/
......@@ -1659,6 +1676,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
switch (chip_idx) {
case DC21140:
case DM910X:
case ULI526X:
default:
if (tp->mtable)
iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
......
......@@ -33,6 +33,13 @@
/* A few user-configurable values. */
#define xircom_debug debug
#ifdef XIRCOM_DEBUG
static int xircom_debug = XIRCOM_DEBUG;
#else
static int xircom_debug = 1;
#endif
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 25;
......@@ -124,19 +131,11 @@ module_param(max_interrupt_work, int, 0);
module_param(rx_copybreak, int, 0);
module_param(csr0, int, 0);
static int num_units;
module_param_array(options, num_units, int, 0);
module_param_array(full_duplex, num_units, int, 0);
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
#define RUN_AT(x) (jiffies + (x))
#define xircom_debug debug
#ifdef XIRCOM_DEBUG
static int xircom_debug = XIRCOM_DEBUG;
#else
static int xircom_debug = 1;
#endif
/*
Theory of Operation
......
......@@ -41,6 +41,7 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_tun.h>
#include <linux/crc32.h>
#include <asm/system.h>
#include <asm/uaccess.h>
......@@ -104,11 +105,42 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
static void tun_net_mclist(struct net_device *dev)
/** Add the specified Ethernet address to this multicast filter. */
static void
add_multi(u32* filter, const u8* addr)
{
/* Nothing to do for multicast filters.
* We always accept all frames. */
return;
int bit_nr = ether_crc(ETH_ALEN, addr) >> 26;
filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
/** Remove the specified Ethernet addres from this multicast filter. */
static void
del_multi(u32* filter, const u8* addr)
{
int bit_nr = ether_crc(ETH_ALEN, addr) >> 26;
filter[bit_nr >> 5] &= ~(1 << (bit_nr & 31));
}
/** Update the list of multicast groups to which the network device belongs.
* This list is used to filter packets being sent from the character device to
* the network device. */
static void
tun_net_mclist(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
const struct dev_mc_list *mclist;
int i;
DBG(KERN_DEBUG "%s: tun_net_mclist: mc_count %d\n",
dev->name, dev->mc_count);
memset(tun->chr_filter, 0, sizeof tun->chr_filter);
for (i = 0, mclist = dev->mc_list; i < dev->mc_count && mclist != NULL;
i++, mclist = mclist->next) {
add_multi(tun->net_filter, mclist->dmi_addr);
DBG(KERN_DEBUG "%s: tun_net_mclist: %x:%x:%x:%x:%x:%x\n",
dev->name,
mclist->dmi_addr[0], mclist->dmi_addr[1], mclist->dmi_addr[2],
mclist->dmi_addr[3], mclist->dmi_addr[4], mclist->dmi_addr[5]);
}
}
static struct net_device_stats *tun_net_stats(struct net_device *dev)
......@@ -301,6 +333,10 @@ static ssize_t tun_chr_readv(struct file *file, const struct iovec *iv,
add_wait_queue(&tun->read_wait, &wait);
while (len) {
const u8 ones[ ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
u8 addr[ ETH_ALEN];
int bit_nr;
current->state = TASK_INTERRUPTIBLE;
/* Read frames from the queue */
......@@ -320,10 +356,37 @@ static ssize_t tun_chr_readv(struct file *file, const struct iovec *iv,
}
netif_start_queue(tun->dev);
ret = tun_put_user(tun, skb, (struct iovec *) iv, len);
kfree_skb(skb);
break;
/** Decide whether to accept this packet. This code is designed to
* behave identically to an Ethernet interface. Accept the packet if
* - we are promiscuous.
* - the packet is addressed to us.
* - the packet is broadcast.
* - the packet is multicast and
* - we are multicast promiscous.
* - we belong to the multicast group.
*/
memcpy(addr, skb->data, min(sizeof addr, skb->len));
bit_nr = ether_crc(sizeof addr, addr) >> 26;
if ((tun->if_flags & IFF_PROMISC) ||
memcmp(addr, tun->dev_addr, sizeof addr) == 0 ||
memcmp(addr, ones, sizeof addr) == 0 ||
(((addr[0] == 1 && addr[1] == 0 && addr[2] == 0x5e) ||
(addr[0] == 0x33 && addr[1] == 0x33)) &&
((tun->if_flags & IFF_ALLMULTI) ||
(tun->chr_filter[bit_nr >> 5] & (1 << (bit_nr & 31)))))) {
DBG(KERN_DEBUG "%s: tun_chr_readv: accepted: %x:%x:%x:%x:%x:%x\n",
tun->dev->name, addr[0], addr[1], addr[2],
addr[3], addr[4], addr[5]);
ret = tun_put_user(tun, skb, (struct iovec *) iv, len);
kfree_skb(skb);
break;
} else {
DBG(KERN_DEBUG "%s: tun_chr_readv: rejected: %x:%x:%x:%x:%x:%x\n",
tun->dev->name, addr[0], addr[1], addr[2],
addr[3], addr[4], addr[5]);
kfree_skb(skb);
continue;
}
}
current->state = TASK_RUNNING;
......@@ -417,6 +480,12 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr)
tun = netdev_priv(dev);
tun->dev = dev;
tun->flags = flags;
/* Be promiscuous by default to maintain previous behaviour. */
tun->if_flags = IFF_PROMISC;
/* Generate random Ethernet address. */
*(u16 *)tun->dev_addr = htons(0x00FF);
get_random_bytes(tun->dev_addr + sizeof(u16), 4);
memset(tun->chr_filter, 0, sizeof tun->chr_filter);
tun_net_init(dev);
......@@ -457,13 +526,16 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct tun_struct *tun = file->private_data;
void __user* argp = (void __user*)arg;
struct ifreq ifr;
if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
if (copy_from_user(&ifr, argp, sizeof ifr))
return -EFAULT;
if (cmd == TUNSETIFF && !tun) {
struct ifreq ifr;
int err;
if (copy_from_user(&ifr, (void __user *)arg, sizeof(ifr)))
return -EFAULT;
ifr.ifr_name[IFNAMSIZ-1] = '\0';
rtnl_lock();
......@@ -473,7 +545,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
if (err)
return err;
if (copy_to_user((void __user *)arg, &ifr, sizeof(ifr)))
if (copy_to_user(argp, &ifr, sizeof(ifr)))
return -EFAULT;
return 0;
}
......@@ -519,6 +591,61 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
break;
#endif
case SIOCGIFFLAGS:
ifr.ifr_flags = tun->if_flags;
if (copy_to_user( argp, &ifr, sizeof ifr))
return -EFAULT;
return 0;
case SIOCSIFFLAGS:
/** Set the character device's interface flags. Currently only
* IFF_PROMISC and IFF_ALLMULTI are used. */
tun->if_flags = ifr.ifr_flags;
DBG(KERN_INFO "%s: interface flags 0x%lx\n",
tun->dev->name, tun->if_flags);
return 0;
case SIOCGIFHWADDR:
memcpy(ifr.ifr_hwaddr.sa_data, tun->dev_addr,
min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr));
if (copy_to_user( argp, &ifr, sizeof ifr))
return -EFAULT;
return 0;
case SIOCSIFHWADDR:
/** Set the character device's hardware address. This is used when
* filtering packets being sent from the network device to the character
* device. */
memcpy(tun->dev_addr, ifr.ifr_hwaddr.sa_data,
min(sizeof ifr.ifr_hwaddr.sa_data, sizeof tun->dev_addr));
DBG(KERN_DEBUG "%s: set hardware address: %x:%x:%x:%x:%x:%x\n",
tun->dev->name,
tun->dev_addr[0], tun->dev_addr[1], tun->dev_addr[2],
tun->dev_addr[3], tun->dev_addr[4], tun->dev_addr[5]);
return 0;
case SIOCADDMULTI:
/** Add the specified group to the character device's multicast filter
* list. */
add_multi(tun->chr_filter, ifr.ifr_hwaddr.sa_data);
DBG(KERN_DEBUG "%s: add multi: %x:%x:%x:%x:%x:%x\n",
tun->dev->name,
(u8)ifr.ifr_hwaddr.sa_data[0], (u8)ifr.ifr_hwaddr.sa_data[1],
(u8)ifr.ifr_hwaddr.sa_data[2], (u8)ifr.ifr_hwaddr.sa_data[3],
(u8)ifr.ifr_hwaddr.sa_data[4], (u8)ifr.ifr_hwaddr.sa_data[5]);
return 0;
case SIOCDELMULTI:
/** Remove the specified group from the character device's multicast
* filter list. */
del_multi(tun->chr_filter, ifr.ifr_hwaddr.sa_data);
DBG(KERN_DEBUG "%s: del multi: %x:%x:%x:%x:%x:%x\n",
tun->dev->name,
(u8)ifr.ifr_hwaddr.sa_data[0], (u8)ifr.ifr_hwaddr.sa_data[1],
(u8)ifr.ifr_hwaddr.sa_data[2], (u8)ifr.ifr_hwaddr.sa_data[3],
(u8)ifr.ifr_hwaddr.sa_data[4], (u8)ifr.ifr_hwaddr.sa_data[5]);
return 0;
default:
return -EINVAL;
};
......
......@@ -859,7 +859,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
#ifdef USE_MMIO
memaddr
#else
ioaddr
(long)ioaddr
#endif
);
......
......@@ -100,8 +100,8 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
#define VELOCITY_PARAM(N,D) \
static const int N[MAX_UNITS]=OPTION_DEFAULT;\
MODULE_PARM(N, "1-" __MODULE_STRING(MAX_UNITS) "i");\
static int N[MAX_UNITS]=OPTION_DEFAULT;\
module_param_array(N, int, NULL, 0); \
MODULE_PARM_DESC(N, D);
#define RX_DESC_MIN 64
......@@ -229,7 +229,7 @@ VELOCITY_PARAM(wol_opts, "Wake On Lan options");
VELOCITY_PARAM(int_works, "Number of packets per interrupt services");
static int rx_copybreak = 200;
MODULE_PARM(rx_copybreak, "i");
module_param(rx_copybreak, int, 0644);
MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info);
......
......@@ -45,6 +45,11 @@ struct tun_struct {
struct fasync_struct *fasync;
unsigned long if_flags;
u8 dev_addr[ETH_ALEN];
u32 chr_filter[2];
u32 net_filter[2];
#ifdef TUN_DEBUG
int debug;
#endif
......
......@@ -2397,6 +2397,9 @@
#define PCI_DEVICE_ID_TIGERJET_300 0x0001
#define PCI_DEVICE_ID_TIGERJET_100 0x0002
#define PCI_VENDOR_ID_TTTECH 0x0357
#define PCI_DEVICE_ID_TTTECH_MC322 0x000A
#define PCI_VENDOR_ID_ARK 0xedd8
#define PCI_DEVICE_ID_ARK_STING 0xa091
#define PCI_DEVICE_ID_ARK_STINGARK 0xa099
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment