Commit 2648345f authored by Malli Chilakala's avatar Malli Chilakala Committed by Jeff Garzik

[PATCH] e1000:Driver version,white space,comments,device id

Driver version, white space, comments, device id & other
Signed-off-by: default avatarMallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com>
Signed-off-by: default avatarGanesh Venkatesan <ganesh.venkatesan@intel.com>
Signed-off-by: default avatarJohn Ronciak <john.ronciak@intel.com>
diff -up net-drivers-2.6/drivers/net/e1000/e1000_ethtool.c net-drivers-2.6/drivers/net/e1000.new/e1000_ethtool.c
parent 1125ecbc
/******************************************************************************* /*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free under the terms of the GNU General Public License as published by the Free
...@@ -139,9 +139,9 @@ struct e1000_adapter; ...@@ -139,9 +139,9 @@ struct e1000_adapter;
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define AUTO_ALL_MODES 0 #define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM 0x0004 #define E1000_EEPROM_82544_APM 0x0400
#define E1000_EEPROM_APME 0x0400 #define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE #ifndef E1000_MASTER_SLAVE
/* Switch to override PHY master/slave setting */ /* Switch to override PHY master/slave setting */
......
/******************************************************************************* /*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free under the terms of the GNU General Public License as published by the Free
...@@ -69,6 +69,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = { ...@@ -69,6 +69,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) }, { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) },
{ "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
{ "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) }, { "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) },
{ "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
{ "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) }, { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) },
{ "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) }, { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) },
{ "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) }, { "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) },
...@@ -593,7 +594,7 @@ e1000_set_ringparam(struct net_device *netdev, ...@@ -593,7 +594,7 @@ e1000_set_ringparam(struct net_device *netdev,
tx_old = adapter->tx_ring; tx_old = adapter->tx_ring;
rx_old = adapter->rx_ring; rx_old = adapter->rx_ring;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL; return -EINVAL;
if(netif_running(adapter->netdev)) if(netif_running(adapter->netdev))
...@@ -784,8 +785,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) ...@@ -784,8 +785,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
/* Hook up test interrupt handler just for this test */ /* Hook up test interrupt handler just for this test */
if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
shared_int = FALSE; shared_int = FALSE;
} else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ,
netdev->name, netdev)){ netdev->name, netdev)){
*data = 1; *data = 1;
return -1; return -1;
} }
...@@ -842,10 +843,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) ...@@ -842,10 +843,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
* test failed. * test failed.
*/ */
adapter->test_icr = 0; adapter->test_icr = 0;
E1000_WRITE_REG(&adapter->hw, IMC, E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF);
(~mask & 0x00007FFF)); E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
E1000_WRITE_REG(&adapter->hw, ICS,
(~mask & 0x00007FFF));
msec_delay(10); msec_delay(10);
if(adapter->test_icr) { if(adapter->test_icr) {
...@@ -1010,7 +1009,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) ...@@ -1010,7 +1009,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
struct sk_buff *skb; struct sk_buff *skb;
if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
GFP_KERNEL))) { GFP_KERNEL))) {
ret_val = 6; ret_val = 6;
goto err_nomem; goto err_nomem;
...@@ -1387,13 +1386,12 @@ static int ...@@ -1387,13 +1386,12 @@ static int
e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
{ {
*data = 0; *data = 0;
if (adapter->hw.media_type == e1000_media_type_internal_serdes) { if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
int i = 0; int i = 0;
adapter->hw.serdes_link_down = TRUE; adapter->hw.serdes_link_down = TRUE;
/* on some blade server designs link establishment */ /* On some blade server designs, link establishment
/* could take as long as 2-3 minutes. */ * could take as long as 2-3 minutes */
do { do {
e1000_check_for_link(&adapter->hw); e1000_check_for_link(&adapter->hw);
if (adapter->hw.serdes_link_down == FALSE) if (adapter->hw.serdes_link_down == FALSE)
...@@ -1401,7 +1399,7 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) ...@@ -1401,7 +1399,7 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
msec_delay(20); msec_delay(20);
} while (i++ < 3750); } while (i++ < 3750);
*data = 1; *data = 1;
} else { } else {
e1000_check_for_link(&adapter->hw); e1000_check_for_link(&adapter->hw);
if(adapter->hw.autoneg) /* if auto_neg is set wait for it */ if(adapter->hw.autoneg) /* if auto_neg is set wait for it */
......
/******************************************************************************* /*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free under the terms of the GNU General Public License as published by the Free
...@@ -144,7 +144,6 @@ e1000_phy_init_script(struct e1000_hw *hw) ...@@ -144,7 +144,6 @@ e1000_phy_init_script(struct e1000_hw *hw)
DEBUGFUNC("e1000_phy_init_script"); DEBUGFUNC("e1000_phy_init_script");
if(hw->phy_init_script) { if(hw->phy_init_script) {
msec_delay(20); msec_delay(20);
......
/******************************************************************************* /*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free under the terms of the GNU General Public License as published by the Free
......
/******************************************************************************* /*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free under the terms of the GNU General Public License as published by the Free
...@@ -29,33 +29,9 @@ ...@@ -29,33 +29,9 @@
#include "e1000.h" #include "e1000.h"
/* Change Log /* Change Log
* 5.3.12 6/7/04 * 6.0.44+ 2/15/05
* - kcompat NETIF_MSG for older kernels (2.4.9) <sean.p.mcdermott@intel.com> * o applied Anton's patch to resolve tx hang in hardware
* - if_mii support and associated kcompat for older kernels * o Applied Andrew Mortons patch - e1000 stops working after resume
* - More errlogging support from Jon Mason <jonmason@us.ibm.com>
* - Fix TSO issues on PPC64 machines -- Jon Mason <jonmason@us.ibm.com>
*
* 5.7.1 12/16/04
* - Resurrect 82547EI/GI related fix in e1000_intr to avoid deadlocks. This
* fix was removed as it caused system instability. The suspected cause of
* this is the called to e1000_irq_disable in e1000_intr. Inlined the
* required piece of e1000_irq_disable into e1000_intr - Anton Blanchard
* 5.7.0 12/10/04
* - include fix to the condition that determines when to quit NAPI - Robert Olsson
* - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
* 5.6.5 11/01/04
* - Enabling NETIF_F_SG without checksum offload is illegal -
John Mason <jdmason@us.ibm.com>
* 5.6.3 10/26/04
* - Remove redundant initialization - Jamal Hadi
* - Reset buffer_info->dma in tx resource cleanup logic
* 5.6.2 10/12/04
* - Avoid filling tx_ring completely - shemminger@osdl.org
* - Replace schedule_timeout() with msleep()/msleep_interruptible() -
* nacc@us.ibm.com
* - Sparse cleanup - shemminger@osdl.org
* - Fix tx resource cleanup logic
* - LLTX support - ak@suse.de and hadi@cyberus.ca
*/ */
char e1000_driver_name[] = "e1000"; char e1000_driver_name[] = "e1000";
...@@ -65,7 +41,7 @@ char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; ...@@ -65,7 +41,7 @@ char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else #else
#define DRIVERNAPI "-NAPI" #define DRIVERNAPI "-NAPI"
#endif #endif
#define DRV_VERSION "5.7.6-k2"DRIVERNAPI #define DRV_VERSION "6.0.54-k2"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION; char e1000_driver_version[] = DRV_VERSION;
char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation."; char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation.";
...@@ -96,6 +72,7 @@ static struct pci_device_id e1000_pci_tbl[] = { ...@@ -96,6 +72,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x1017), INTEL_E1000_ETHERNET_DEVICE(0x1017),
INTEL_E1000_ETHERNET_DEVICE(0x1018), INTEL_E1000_ETHERNET_DEVICE(0x1018),
INTEL_E1000_ETHERNET_DEVICE(0x1019), INTEL_E1000_ETHERNET_DEVICE(0x1019),
INTEL_E1000_ETHERNET_DEVICE(0x101A),
INTEL_E1000_ETHERNET_DEVICE(0x101D), INTEL_E1000_ETHERNET_DEVICE(0x101D),
INTEL_E1000_ETHERNET_DEVICE(0x101E), INTEL_E1000_ETHERNET_DEVICE(0x101E),
INTEL_E1000_ETHERNET_DEVICE(0x1026), INTEL_E1000_ETHERNET_DEVICE(0x1026),
...@@ -110,6 +87,9 @@ static struct pci_device_id e1000_pci_tbl[] = { ...@@ -110,6 +87,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x107B), INTEL_E1000_ETHERNET_DEVICE(0x107B),
INTEL_E1000_ETHERNET_DEVICE(0x107C), INTEL_E1000_ETHERNET_DEVICE(0x107C),
INTEL_E1000_ETHERNET_DEVICE(0x108A), INTEL_E1000_ETHERNET_DEVICE(0x108A),
INTEL_E1000_ETHERNET_DEVICE(0x108B),
INTEL_E1000_ETHERNET_DEVICE(0x108C),
INTEL_E1000_ETHERNET_DEVICE(0x1099),
/* required last entry */ /* required last entry */
{0,} {0,}
}; };
...@@ -350,8 +330,11 @@ e1000_up(struct e1000_adapter *adapter) ...@@ -350,8 +330,11 @@ e1000_up(struct e1000_adapter *adapter)
#endif #endif
if((err = request_irq(adapter->pdev->irq, &e1000_intr, if((err = request_irq(adapter->pdev->irq, &e1000_intr,
SA_SHIRQ | SA_SAMPLE_RANDOM, SA_SHIRQ | SA_SAMPLE_RANDOM,
netdev->name, netdev))) netdev->name, netdev))) {
DPRINTK(PROBE, ERR,
"Unable to allocate interrupt Error: %d\n", err);
return err; return err;
}
mod_timer(&adapter->watchdog_timer, jiffies); mod_timer(&adapter->watchdog_timer, jiffies);
...@@ -636,7 +619,7 @@ e1000_probe(struct pci_dev *pdev, ...@@ -636,7 +619,7 @@ e1000_probe(struct pci_dev *pdev,
/* copy the MAC address out of the EEPROM */ /* copy the MAC address out of the EEPROM */
if (e1000_read_mac_addr(&adapter->hw)) if(e1000_read_mac_addr(&adapter->hw))
DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
...@@ -963,12 +946,10 @@ e1000_check_64k_bound(struct e1000_adapter *adapter, ...@@ -963,12 +946,10 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
unsigned long begin = (unsigned long) start; unsigned long begin = (unsigned long) start;
unsigned long end = begin + len; unsigned long end = begin + len;
/* first rev 82545 and 82546 need to not allow any memory /* First rev 82545 and 82546 need to not allow any memory
* write location to cross a 64k boundary due to errata 23 */ * write location to cross 64k boundary due to errata 23 */
if (adapter->hw.mac_type == e1000_82545 || if (adapter->hw.mac_type == e1000_82545 ||
adapter->hw.mac_type == e1000_82546 ) { adapter->hw.mac_type == e1000_82546) {
/* check buffer doesn't cross 64kB */
return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE; return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
} }
...@@ -992,8 +973,8 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter) ...@@ -992,8 +973,8 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter)
size = sizeof(struct e1000_buffer) * txdr->count; size = sizeof(struct e1000_buffer) * txdr->count;
txdr->buffer_info = vmalloc(size); txdr->buffer_info = vmalloc(size);
if(!txdr->buffer_info) { if(!txdr->buffer_info) {
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR,
"Unable to Allocate Memory for the Transmit descriptor ring\n"); "Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM; return -ENOMEM;
} }
memset(txdr->buffer_info, 0, size); memset(txdr->buffer_info, 0, size);
...@@ -1006,38 +987,38 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter) ...@@ -1006,38 +987,38 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter)
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
if(!txdr->desc) { if(!txdr->desc) {
setup_tx_desc_die: setup_tx_desc_die:
DPRINTK(PROBE, ERR,
"Unable to Allocate Memory for the Transmit descriptor ring\n");
vfree(txdr->buffer_info); vfree(txdr->buffer_info);
DPRINTK(PROBE, ERR,
"Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM; return -ENOMEM;
} }
/* fix for errata 23, cant cross 64kB boundary */ /* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
void *olddesc = txdr->desc; void *olddesc = txdr->desc;
dma_addr_t olddma = txdr->dma; dma_addr_t olddma = txdr->dma;
DPRINTK(TX_ERR,ERR,"txdr align check failed: %u bytes at %p\n", DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
txdr->size, txdr->desc); "at %p\n", txdr->size, txdr->desc);
/* try again, without freeing the previous */ /* Try again, without freeing the previous */
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
/* failed allocation, critial failure */
if(!txdr->desc) { if(!txdr->desc) {
/* Failed allocation, critical failure */
pci_free_consistent(pdev, txdr->size, olddesc, olddma); pci_free_consistent(pdev, txdr->size, olddesc, olddma);
goto setup_tx_desc_die; goto setup_tx_desc_die;
} }
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
/* give up */ /* give up */
pci_free_consistent(pdev, txdr->size, pci_free_consistent(pdev, txdr->size, txdr->desc,
txdr->desc, txdr->dma); txdr->dma);
pci_free_consistent(pdev, txdr->size, olddesc, olddma); pci_free_consistent(pdev, txdr->size, olddesc, olddma);
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR,
"Unable to Allocate aligned Memory for the Transmit" "Unable to allocate aligned memory "
" descriptor ring\n"); "for the transmit descriptor ring\n");
vfree(txdr->buffer_info); vfree(txdr->buffer_info);
return -ENOMEM; return -ENOMEM;
} else { } else {
/* free old, move on with the new one since its okay */ /* Free old allocation, new allocation was successful */
pci_free_consistent(pdev, txdr->size, olddesc, olddma); pci_free_consistent(pdev, txdr->size, olddesc, olddma);
} }
} }
...@@ -1144,8 +1125,8 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter) ...@@ -1144,8 +1125,8 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
size = sizeof(struct e1000_buffer) * rxdr->count; size = sizeof(struct e1000_buffer) * rxdr->count;
rxdr->buffer_info = vmalloc(size); rxdr->buffer_info = vmalloc(size);
if(!rxdr->buffer_info) { if(!rxdr->buffer_info) {
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR,
"Unable to Allocate Memory for the Recieve descriptor ring\n"); "Unable to allocate memory for the receive descriptor ring\n");
return -ENOMEM; return -ENOMEM;
} }
memset(rxdr->buffer_info, 0, size); memset(rxdr->buffer_info, 0, size);
...@@ -1185,43 +1166,42 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter) ...@@ -1185,43 +1166,42 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
if(!rxdr->desc) { if(!rxdr->desc) {
setup_rx_desc_die: setup_rx_desc_die:
DPRINTK(PROBE, ERR,
"Unble to Allocate Memory for the Recieve descriptor ring\n");
vfree(rxdr->buffer_info); vfree(rxdr->buffer_info);
kfree(rxdr->ps_page); kfree(rxdr->ps_page);
kfree(rxdr->ps_page_dma); kfree(rxdr->ps_page_dma);
DPRINTK(PROBE, ERR,
"Unable to allocate memory for the receive descriptor ring\n");
return -ENOMEM; return -ENOMEM;
} }
/* fix for errata 23, cant cross 64kB boundary */ /* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
void *olddesc = rxdr->desc; void *olddesc = rxdr->desc;
dma_addr_t olddma = rxdr->dma; dma_addr_t olddma = rxdr->dma;
DPRINTK(RX_ERR,ERR, DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
"rxdr align check failed: %u bytes at %p\n", "at %p\n", rxdr->size, rxdr->desc);
rxdr->size, rxdr->desc); /* Try again, without freeing the previous */
/* try again, without freeing the previous */
rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
/* failed allocation, critial failure */
if(!rxdr->desc) { if(!rxdr->desc) {
/* Failed allocation, critical failure */
pci_free_consistent(pdev, rxdr->size, olddesc, olddma); pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
goto setup_rx_desc_die; goto setup_rx_desc_die;
} }
if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
/* give up */ /* give up */
pci_free_consistent(pdev, rxdr->size, pci_free_consistent(pdev, rxdr->size, rxdr->desc,
rxdr->desc, rxdr->dma); rxdr->dma);
pci_free_consistent(pdev, rxdr->size, olddesc, olddma); pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR,
"Unable to Allocate aligned Memory for the" "Unable to allocate aligned memory "
" Receive descriptor ring\n"); "for the receive descriptor ring\n");
vfree(rxdr->buffer_info); vfree(rxdr->buffer_info);
kfree(rxdr->ps_page); kfree(rxdr->ps_page);
kfree(rxdr->ps_page_dma); kfree(rxdr->ps_page_dma);
return -ENOMEM; return -ENOMEM;
} else { } else {
/* free old, move on with the new one since its okay */ /* Free old allocation, new allocation was successful */
pci_free_consistent(pdev, rxdr->size, olddesc, olddma); pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
} }
} }
...@@ -1234,7 +1214,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter) ...@@ -1234,7 +1214,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
} }
/** /**
* e1000_setup_rctl - configure the receive control register * e1000_setup_rctl - configure the receive control registers
* @adapter: Board private structure * @adapter: Board private structure
**/ **/
...@@ -1426,13 +1406,11 @@ static inline void ...@@ -1426,13 +1406,11 @@ static inline void
e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info) struct e1000_buffer *buffer_info)
{ {
struct pci_dev *pdev = adapter->pdev;
if(buffer_info->dma) { if(buffer_info->dma) {
pci_unmap_page(pdev, pci_unmap_page(adapter->pdev,
buffer_info->dma, buffer_info->dma,
buffer_info->length, buffer_info->length,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
buffer_info->dma = 0; buffer_info->dma = 0;
} }
if(buffer_info->skb) { if(buffer_info->skb) {
...@@ -1457,7 +1435,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter) ...@@ -1457,7 +1435,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter)
/* Free all the Tx ring sk_buffs */ /* Free all the Tx ring sk_buffs */
if (likely(adapter->previous_buffer_info.skb != NULL)) { if (likely(adapter->previous_buffer_info.skb != NULL)) {
e1000_unmap_and_free_tx_resource(adapter, e1000_unmap_and_free_tx_resource(adapter,
&adapter->previous_buffer_info); &adapter->previous_buffer_info);
} }
...@@ -1659,15 +1637,15 @@ e1000_set_multi(struct net_device *netdev) ...@@ -1659,15 +1637,15 @@ e1000_set_multi(struct net_device *netdev)
struct e1000_adapter *adapter = netdev->priv; struct e1000_adapter *adapter = netdev->priv;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
struct dev_mc_list *mc_ptr; struct dev_mc_list *mc_ptr;
unsigned long flags;
uint32_t rctl; uint32_t rctl;
uint32_t hash_value; uint32_t hash_value;
int i; int i;
unsigned long flags;
/* Check for Promiscuous and All Multicast modes */
spin_lock_irqsave(&adapter->tx_lock, flags); spin_lock_irqsave(&adapter->tx_lock, flags);
/* Check for Promiscuous and All Multicast modes */
rctl = E1000_READ_REG(hw, RCTL); rctl = E1000_READ_REG(hw, RCTL);
if(netdev->flags & IFF_PROMISC) { if(netdev->flags & IFF_PROMISC) {
...@@ -1874,7 +1852,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter) ...@@ -1874,7 +1852,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
/* Cause software interrupt to ensure rx ring is cleaned */ /* Cause software interrupt to ensure rx ring is cleaned */
E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
/* Force detection of hung controller every watchdog period*/ /* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = TRUE; adapter->detect_tx_hung = TRUE;
/* Reset the timer */ /* Reset the timer */
...@@ -2255,7 +2233,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -2255,7 +2233,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
#ifdef NETIF_F_TSO #ifdef NETIF_F_TSO
mss = skb_shinfo(skb)->tso_size; mss = skb_shinfo(skb)->tso_size;
/* The controller does a simple calculation to /* The controller does a simple calculation to
* make sure there is enough room in the FIFO before * make sure there is enough room in the FIFO before
* initiating the DMA for each buffer. The calc is: * initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't * 4 = ceil(buffer len/mss). To make sure we don't
...@@ -2268,7 +2246,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -2268,7 +2246,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if((mss) || (skb->ip_summed == CHECKSUM_HW)) if((mss) || (skb->ip_summed == CHECKSUM_HW))
count++; count++;
count++; /* for sentinel desc */ count++;
#else #else
if(skb->ip_summed == CHECKSUM_HW) if(skb->ip_summed == CHECKSUM_HW)
count++; count++;
...@@ -2658,7 +2636,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) ...@@ -2658,7 +2636,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
*/ */
if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){ if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){
atomic_inc(&adapter->irq_sem); atomic_inc(&adapter->irq_sem);
E1000_WRITE_REG(&adapter->hw, IMC, ~0); E1000_WRITE_REG(hw, IMC, ~0);
} }
for(i = 0; i < E1000_MAX_INTR; i++) for(i = 0; i < E1000_MAX_INTR; i++)
...@@ -2686,7 +2664,7 @@ e1000_clean(struct net_device *netdev, int *budget) ...@@ -2686,7 +2664,7 @@ e1000_clean(struct net_device *netdev, int *budget)
int work_to_do = min(*budget, netdev->quota); int work_to_do = min(*budget, netdev->quota);
int tx_cleaned; int tx_cleaned;
int work_done = 0; int work_done = 0;
tx_cleaned = e1000_clean_tx_irq(adapter); tx_cleaned = e1000_clean_tx_irq(adapter);
adapter->clean_rx(adapter, &work_done, work_to_do); adapter->clean_rx(adapter, &work_done, work_to_do);
...@@ -2776,9 +2754,9 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter) ...@@ -2776,9 +2754,9 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
netif_wake_queue(netdev); netif_wake_queue(netdev);
spin_unlock(&adapter->tx_lock); spin_unlock(&adapter->tx_lock);
if(adapter->detect_tx_hung) { if(adapter->detect_tx_hung) {
/* detect a transmit hang in hardware, this serializes the
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */ * check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = FALSE; adapter->detect_tx_hung = FALSE;
if (tx_ring->buffer_info[i].dma && if (tx_ring->buffer_info[i].dma &&
...@@ -2923,7 +2901,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter) ...@@ -2923,7 +2901,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) { if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
/* All receives must fit into a single buffer */ /* All receives must fit into a single buffer */
E1000_DBG("%s: Receive packet consumed multiple" E1000_DBG("%s: Receive packet consumed multiple"
" buffers\n", netdev->name); " buffers\n", netdev->name);
dev_kfree_skb_irq(skb); dev_kfree_skb_irq(skb);
goto next_desc; goto next_desc;
} }
...@@ -3130,43 +3108,43 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter) ...@@ -3130,43 +3108,43 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
struct e1000_rx_desc *rx_desc; struct e1000_rx_desc *rx_desc;
struct e1000_buffer *buffer_info; struct e1000_buffer *buffer_info;
struct sk_buff *skb; struct sk_buff *skb;
unsigned int i, bufsz; unsigned int i;
unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
i = rx_ring->next_to_use; i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i]; buffer_info = &rx_ring->buffer_info[i];
while(!buffer_info->skb) { while(!buffer_info->skb) {
bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
skb = dev_alloc_skb(bufsz); skb = dev_alloc_skb(bufsz);
if(unlikely(!skb)) { if(unlikely(!skb)) {
/* Better luck next round */ /* Better luck next round */
break; break;
} }
/* fix for errata 23, cant cross 64kB boundary */ /* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
struct sk_buff *oldskb = skb; struct sk_buff *oldskb = skb;
DPRINTK(RX_ERR,ERR, DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
"skb align check failed: %u bytes at %p\n", "at %p\n", bufsz, skb->data);
bufsz, skb->data); /* Try again, without freeing the previous */
/* try again, without freeing the previous */
skb = dev_alloc_skb(bufsz); skb = dev_alloc_skb(bufsz);
/* Failed allocation, critical failure */
if (!skb) { if (!skb) {
dev_kfree_skb(oldskb); dev_kfree_skb(oldskb);
break; break;
} }
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
/* give up */ /* give up */
dev_kfree_skb(skb); dev_kfree_skb(skb);
dev_kfree_skb(oldskb); dev_kfree_skb(oldskb);
break; /* while !buffer_info->skb */ break; /* while !buffer_info->skb */
} else { } else {
/* move on with the new one */ /* Use new allocation */
dev_kfree_skb(oldskb); dev_kfree_skb(oldskb);
} }
} }
/* Make buffer alignment 2 beyond a 16 byte boundary /* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after * this will result in a 16 byte aligned IP header after
* the 14 byte MAC header is removed * the 14 byte MAC header is removed
...@@ -3182,25 +3160,23 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter) ...@@ -3182,25 +3160,23 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
adapter->rx_buffer_len, adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
/* fix for errata 23, cant cross 64kB boundary */ /* Fix for errata 23, can't cross 64kB boundary */
if(!e1000_check_64k_bound(adapter, if (!e1000_check_64k_bound(adapter,
(void *)(unsigned long)buffer_info->dma, (void *)(unsigned long)buffer_info->dma,
adapter->rx_buffer_len)) { adapter->rx_buffer_len)) {
DPRINTK(RX_ERR,ERR, DPRINTK(RX_ERR, ERR,
"dma align check failed: %u bytes at %ld\n", "dma align check failed: %u bytes at %p\n",
adapter->rx_buffer_len, (unsigned long)buffer_info->dma); adapter->rx_buffer_len,
(void *)(unsigned long)buffer_info->dma);
dev_kfree_skb(skb); dev_kfree_skb(skb);
buffer_info->skb = NULL; buffer_info->skb = NULL;
pci_unmap_single(pdev, pci_unmap_single(pdev, buffer_info->dma,
buffer_info->dma,
adapter->rx_buffer_len, adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
break; /* while !buffer_info->skb */ break; /* while !buffer_info->skb */
} }
rx_desc = E1000_RX_DESC(*rx_ring, i); rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
...@@ -3210,7 +3186,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter) ...@@ -3210,7 +3186,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,
* such as IA-64). */ * such as IA-64). */
wmb(); wmb();
E1000_WRITE_REG(&adapter->hw, RDT, i); E1000_WRITE_REG(&adapter->hw, RDT, i);
} }
...@@ -3483,9 +3458,10 @@ void ...@@ -3483,9 +3458,10 @@ void
e1000_pci_set_mwi(struct e1000_hw *hw) e1000_pci_set_mwi(struct e1000_hw *hw)
{ {
struct e1000_adapter *adapter = hw->back; struct e1000_adapter *adapter = hw->back;
int ret_val = pci_set_mwi(adapter->pdev);
int ret; if(ret_val)
ret = pci_set_mwi(adapter->pdev); DPRINTK(PROBE, ERR, "Error in setting MWI\n");
} }
void void
...@@ -3643,8 +3619,7 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx) ...@@ -3643,8 +3619,7 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
break; break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */ case SPEED_1000 + DUPLEX_HALF: /* not supported */
default: default:
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
"Unsupported Speed/Duplexity configuration\n");
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
...@@ -3810,7 +3785,7 @@ e1000_resume(struct pci_dev *pdev) ...@@ -3810,7 +3785,7 @@ e1000_resume(struct pci_dev *pdev)
* the interrupt routine is executing. * the interrupt routine is executing.
*/ */
static void static void
e1000_netpoll (struct net_device *netdev) e1000_netpoll(struct net_device *netdev)
{ {
struct e1000_adapter *adapter = netdev->priv; struct e1000_adapter *adapter = netdev->priv;
disable_irq(adapter->pdev->irq); disable_irq(adapter->pdev->irq);
......
/******************************************************************************* /*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free under the terms of the GNU General Public License as published by the Free
......
/******************************************************************************* /*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free under the terms of the GNU General Public License as published by the Free
...@@ -478,7 +478,6 @@ e1000_check_options(struct e1000_adapter *adapter) ...@@ -478,7 +478,6 @@ e1000_check_options(struct e1000_adapter *adapter)
DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
opt.name); opt.name);
break; break;
case -1:
default: default:
e1000_validate_option(&adapter->itr, &opt, e1000_validate_option(&adapter->itr, &opt,
adapter); adapter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment