Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
ed492288
Commit
ed492288
authored
Mar 12, 2002
by
Jeff Garzik
Browse files
Options
Browse Files
Download
Plain Diff
Merge mandrakesoft.com:/home/jgarzik/vanilla/linus-2.5
into mandrakesoft.com:/home/jgarzik/repo/net-drivers-2.5
parents
d9f2d50e
77d28a4f
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
1185 additions
and
244 deletions
+1185
-244
drivers/net/8139cp.c
drivers/net/8139cp.c
+17
-7
drivers/net/bmac.c
drivers/net/bmac.c
+1
-0
drivers/net/e100/e100.h
drivers/net/e100/e100.h
+12
-5
drivers/net/e100/e100_eeprom.c
drivers/net/e100/e100_eeprom.c
+16
-15
drivers/net/e100/e100_main.c
drivers/net/e100/e100_main.c
+289
-49
drivers/net/e100/e100_phy.c
drivers/net/e100/e100_phy.c
+1
-1
drivers/net/eepro100.c
drivers/net/eepro100.c
+92
-83
drivers/net/tg3.c
drivers/net/tg3.c
+557
-79
drivers/net/tg3.h
drivers/net/tg3.h
+47
-4
include/linux/ethtool.h
include/linux/ethtool.h
+153
-1
No files found.
drivers/net/8139cp.c
View file @
ed492288
...
@@ -65,9 +65,15 @@
...
@@ -65,9 +65,15 @@
#include <asm/io.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define CP_VLAN_TAG_USED 1
#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
do { (tx_desc)->opts2 = (vlan_tag_value); } while (0)
#else
#define CP_VLAN_TAG_USED 0
#define CP_VLAN_TAG_USED 0
#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
do { (tx_desc)->opts2 = 0; } while (0)
do { (tx_desc)->opts2 = 0; } while (0)
#endif
/* These identify the driver base version and may not be removed. */
/* These identify the driver base version and may not be removed. */
static
char
version
[]
__devinitdata
=
static
char
version
[]
__devinitdata
=
...
@@ -643,7 +649,7 @@ static void cp_tx (struct cp_private *cp)
...
@@ -643,7 +649,7 @@ static void cp_tx (struct cp_private *cp)
cp
->
tx_tail
=
tx_tail
;
cp
->
tx_tail
=
tx_tail
;
if
(
netif_queue_stopped
(
cp
->
dev
)
&&
(
TX_BUFFS_AVAIL
(
cp
)
>
1
))
if
(
netif_queue_stopped
(
cp
->
dev
)
&&
(
TX_BUFFS_AVAIL
(
cp
)
>
(
MAX_SKB_FRAGS
+
1
)
))
netif_wake_queue
(
cp
->
dev
);
netif_wake_queue
(
cp
->
dev
);
}
}
...
@@ -658,9 +664,12 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
...
@@ -658,9 +664,12 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
spin_lock_irq
(
&
cp
->
lock
);
spin_lock_irq
(
&
cp
->
lock
);
/* This is a hard error, log it. */
if
(
TX_BUFFS_AVAIL
(
cp
)
<=
(
skb_shinfo
(
skb
)
->
nr_frags
+
1
))
{
if
(
TX_BUFFS_AVAIL
(
cp
)
<=
(
skb_shinfo
(
skb
)
->
nr_frags
+
1
))
{
netif_stop_queue
(
dev
);
netif_stop_queue
(
dev
);
spin_unlock_irq
(
&
cp
->
lock
);
spin_unlock_irq
(
&
cp
->
lock
);
printk
(
KERN_ERR
PFX
"%s: BUG! Tx Ring full when queue awake!
\n
"
,
dev
->
name
);
return
1
;
return
1
;
}
}
...
@@ -760,9 +769,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
...
@@ -760,9 +769,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
if
(
netif_msg_tx_queued
(
cp
))
if
(
netif_msg_tx_queued
(
cp
))
printk
(
KERN_DEBUG
"%s: tx queued, slot %d, skblen %d
\n
"
,
printk
(
KERN_DEBUG
"%s: tx queued, slot %d, skblen %d
\n
"
,
dev
->
name
,
entry
,
skb
->
len
);
dev
->
name
,
entry
,
skb
->
len
);
if
(
TX_BUFFS_AVAIL
(
cp
)
<
0
)
if
(
TX_BUFFS_AVAIL
(
cp
)
<=
(
MAX_SKB_FRAGS
+
1
))
BUG
();
if
(
TX_BUFFS_AVAIL
(
cp
)
==
0
)
netif_stop_queue
(
dev
);
netif_stop_queue
(
dev
);
spin_unlock_irq
(
&
cp
->
lock
);
spin_unlock_irq
(
&
cp
->
lock
);
...
@@ -773,6 +780,9 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
...
@@ -773,6 +780,9 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
return
0
;
return
0
;
}
}
/* Set or clear the multicast filter for this adaptor.
This routine is not state sensitive and need not be SMP locked. */
static
void
__cp_set_rx_mode
(
struct
net_device
*
dev
)
static
void
__cp_set_rx_mode
(
struct
net_device
*
dev
)
{
{
struct
cp_private
*
cp
=
dev
->
priv
;
struct
cp_private
*
cp
=
dev
->
priv
;
...
@@ -1072,6 +1082,7 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
...
@@ -1072,6 +1082,7 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
/* if network interface not up, no need for complexity */
/* if network interface not up, no need for complexity */
if
(
!
netif_running
(
dev
))
{
if
(
!
netif_running
(
dev
))
{
dev
->
mtu
=
new_mtu
;
cp_set_rxbufsize
(
cp
);
/* set new rx buf size */
cp_set_rxbufsize
(
cp
);
/* set new rx buf size */
return
0
;
return
0
;
}
}
...
@@ -1081,6 +1092,7 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
...
@@ -1081,6 +1092,7 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
cp_stop_hw
(
cp
);
/* stop h/w and free rings */
cp_stop_hw
(
cp
);
/* stop h/w and free rings */
cp_clean_rings
(
cp
);
cp_clean_rings
(
cp
);
dev
->
mtu
=
new_mtu
;
cp_set_rxbufsize
(
cp
);
/* set new rx buf size */
cp_set_rxbufsize
(
cp
);
/* set new rx buf size */
rc
=
cp_init_rings
(
cp
);
/* realloc and restart h/w */
rc
=
cp_init_rings
(
cp
);
/* realloc and restart h/w */
...
@@ -1226,7 +1238,7 @@ static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
...
@@ -1226,7 +1238,7 @@ static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
}
}
#if CP_VLAN_TAG_USED
#if CP_VLAN_TAG_USED
static
int
cp_vlan_rx_register
(
struct
net_device
*
dev
,
struct
vlan_group
*
grp
)
static
void
cp_vlan_rx_register
(
struct
net_device
*
dev
,
struct
vlan_group
*
grp
)
{
{
struct
cp_private
*
cp
=
dev
->
priv
;
struct
cp_private
*
cp
=
dev
->
priv
;
...
@@ -1234,8 +1246,6 @@ static int cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
...
@@ -1234,8 +1246,6 @@ static int cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
cp
->
vlgrp
=
grp
;
cp
->
vlgrp
=
grp
;
cpw16
(
CpCmd
,
cpr16
(
CpCmd
)
|
RxVlanOn
);
cpw16
(
CpCmd
,
cpr16
(
CpCmd
)
|
RxVlanOn
);
spin_unlock_irq
(
&
cp
->
lock
);
spin_unlock_irq
(
&
cp
->
lock
);
return
0
;
}
}
static
void
cp_vlan_rx_kill_vid
(
struct
net_device
*
dev
,
unsigned
short
vid
)
static
void
cp_vlan_rx_kill_vid
(
struct
net_device
*
dev
,
unsigned
short
vid
)
...
...
drivers/net/bmac.c
View file @
ed492288
...
@@ -17,6 +17,7 @@
...
@@ -17,6 +17,7 @@
#include <linux/timer.h>
#include <linux/timer.h>
#include <linux/proc_fs.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/init.h>
#include <linux/crc32.h>
#include <asm/prom.h>
#include <asm/prom.h>
#include <asm/dbdma.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/io.h>
...
...
drivers/net/e100/e100.h
View file @
ed492288
...
@@ -128,12 +128,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
...
@@ -128,12 +128,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define E100_DEFAULT_TCB MAX_TCB
#define E100_DEFAULT_TCB MAX_TCB
#define E100_MIN_TCB 2*TX_FRAME_CNT + 3
/* make room for at least 2 interrupts */
#define E100_MIN_TCB 2*TX_FRAME_CNT + 3
/* make room for at least 2 interrupts */
#ifdef __ia64__
/* We can't use too many DMAble buffers on IA64 machines with >4 GB mem */
#define E100_MAX_TCB 64
#else
#define E100_MAX_TCB 1024
#define E100_MAX_TCB 1024
#endif
/* __ia64__ */
#define E100_DEFAULT_RFD MAX_RFD
#define E100_DEFAULT_RFD MAX_RFD
#define E100_MIN_RFD 8
#define E100_MIN_RFD 8
...
@@ -766,6 +761,8 @@ typedef enum _non_tx_cmd_state_t {
...
@@ -766,6 +761,8 @@ typedef enum _non_tx_cmd_state_t {
#define IPCB_INSERTVLAN_ENABLE BIT_1
#define IPCB_INSERTVLAN_ENABLE BIT_1
#define IPCB_IP_ACTIVATION_DEFAULT IPCB_HARDWAREPARSING_ENABLE
#define IPCB_IP_ACTIVATION_DEFAULT IPCB_HARDWAREPARSING_ENABLE
#define FOLD_CSUM(_XSUM) ((((_XSUM << 16) | (_XSUM >> 16)) + _XSUM) >> 16)
/* Transmit Buffer Descriptor (TBD)*/
/* Transmit Buffer Descriptor (TBD)*/
typedef
struct
_tbd_t
{
typedef
struct
_tbd_t
{
u32
tbd_buf_addr
;
/* Physical Transmit Buffer Address */
u32
tbd_buf_addr
;
/* Physical Transmit Buffer Address */
...
@@ -1008,6 +1005,11 @@ struct e100_private {
...
@@ -1008,6 +1005,11 @@ struct e100_private {
u32
wolopts
;
u32
wolopts
;
u16
ip_lbytes
;
u16
ip_lbytes
;
#endif
#endif
#ifdef CONFIG_PM
u32
pci_state
[
16
];
#endif
};
};
#define E100_AUTONEG 0
#define E100_AUTONEG 0
...
@@ -1030,4 +1032,9 @@ extern unsigned char e100_selftest(struct e100_private *bdp, u32 *st_timeout,
...
@@ -1030,4 +1032,9 @@ extern unsigned char e100_selftest(struct e100_private *bdp, u32 *st_timeout,
extern
unsigned
char
e100_get_link_state
(
struct
e100_private
*
bdp
);
extern
unsigned
char
e100_get_link_state
(
struct
e100_private
*
bdp
);
extern
unsigned
char
e100_wait_scb
(
struct
e100_private
*
bdp
);
extern
unsigned
char
e100_wait_scb
(
struct
e100_private
*
bdp
);
extern
void
e100_deisolate_driver
(
struct
e100_private
*
bdp
,
u8
recover
,
u8
full_reset
);
extern
unsigned
char
e100_hw_reset_recover
(
struct
e100_private
*
bdp
,
u32
reset_cmd
);
#endif
#endif
drivers/net/e100/e100_eeprom.c
View file @
ed492288
...
@@ -69,6 +69,10 @@ ANY LOSS OF USE; DATA, OR PROFITS; OR BUSINESS INTERUPTION) HOWEVER CAUSED
...
@@ -69,6 +69,10 @@ ANY LOSS OF USE; DATA, OR PROFITS; OR BUSINESS INTERUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY OR
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
Portions (C) 2002 Red Hat, Inc. under the terms of the GNU GPL v2.
*******************************************************************************/
*******************************************************************************/
/**********************************************************************
/**********************************************************************
...
@@ -152,7 +156,7 @@ eeprom_set_semaphore(struct e100_private *adapter)
...
@@ -152,7 +156,7 @@ eeprom_set_semaphore(struct e100_private *adapter)
}
}
set_current_state
(
TASK_UNINTERRUPTIBLE
);
set_current_state
(
TASK_UNINTERRUPTIBLE
);
schedule_timeout
(
1
);
schedule_timeout
(
1
+
(
HZ
-
1
)
/
100
);
}
}
return
false
;
return
false
;
}
}
...
@@ -252,19 +256,12 @@ e100_eeprom_size(struct e100_private *adapter)
...
@@ -252,19 +256,12 @@ e100_eeprom_size(struct e100_private *adapter)
// Returns: bits in an address for that size eeprom
// Returns: bits in an address for that size eeprom
//----------------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------
static
u16
static
inline
int
eeprom_address_size
(
u16
size
)
eeprom_address_size
(
u16
size
)
{
{
switch
(
size
)
{
int
isize
=
size
;
case
64
:
return
6
;
return
ffs
(
isize
);
case
128
:
return
7
;
case
256
:
return
8
;
}
return
0
;
//fix compiler warning or error!
}
}
//----------------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------
...
@@ -348,6 +345,7 @@ shift_out_bits(struct e100_private *adapter, u16 data, u16 count)
...
@@ -348,6 +345,7 @@ shift_out_bits(struct e100_private *adapter, u16 data, u16 count)
x
|=
EEDI
;
x
|=
EEDI
;
writew
(
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
writew
(
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
readw
(
&
(
adapter
->
scb
->
scb_status
));
/* flush command to card */
udelay
(
EEPROM_STALL_TIME
);
udelay
(
EEPROM_STALL_TIME
);
raise_clock
(
adapter
,
&
x
);
raise_clock
(
adapter
,
&
x
);
lower_clock
(
adapter
,
&
x
);
lower_clock
(
adapter
,
&
x
);
...
@@ -374,6 +372,7 @@ raise_clock(struct e100_private *adapter, u16 *x)
...
@@ -374,6 +372,7 @@ raise_clock(struct e100_private *adapter, u16 *x)
{
{
*
x
=
*
x
|
EESK
;
*
x
=
*
x
|
EESK
;
writew
(
*
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
writew
(
*
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
readw
(
&
(
adapter
->
scb
->
scb_status
));
/* flush command to card */
udelay
(
EEPROM_STALL_TIME
);
udelay
(
EEPROM_STALL_TIME
);
}
}
...
@@ -393,6 +392,7 @@ lower_clock(struct e100_private *adapter, u16 *x)
...
@@ -393,6 +392,7 @@ lower_clock(struct e100_private *adapter, u16 *x)
{
{
*
x
=
*
x
&
~
EESK
;
*
x
=
*
x
&
~
EESK
;
writew
(
*
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
writew
(
*
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
readw
(
&
(
adapter
->
scb
->
scb_status
));
/* flush command to card */
udelay
(
EEPROM_STALL_TIME
);
udelay
(
EEPROM_STALL_TIME
);
}
}
...
@@ -498,7 +498,7 @@ e100_eeprom_write_word(struct e100_private *adapter, u16 reg, u16 data)
...
@@ -498,7 +498,7 @@ e100_eeprom_write_word(struct e100_private *adapter, u16 reg, u16 data)
x
=
readw
(
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
x
=
readw
(
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
x
&=
~
(
EEDI
|
EEDO
|
EESK
);
x
&=
~
(
EEDI
|
EEDO
|
EESK
);
writew
(
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
writew
(
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
wmb
();
readw
(
&
(
adapter
->
scb
->
scb_status
));
/* flush command to card */
udelay
(
EEPROM_STALL_TIME
);
udelay
(
EEPROM_STALL_TIME
);
x
|=
EECS
;
x
|=
EECS
;
writew
(
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
writew
(
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
...
@@ -587,7 +587,7 @@ eeprom_wait_cmd_done(struct e100_private *adapter)
...
@@ -587,7 +587,7 @@ eeprom_wait_cmd_done(struct e100_private *adapter)
return
true
;
return
true
;
set_current_state
(
TASK_UNINTERRUPTIBLE
);
set_current_state
(
TASK_UNINTERRUPTIBLE
);
schedule_timeout
(
1
);
schedule_timeout
(
1
+
(
HZ
-
1
)
/
100
);
}
}
return
false
;
return
false
;
...
@@ -606,9 +606,10 @@ eeprom_stand_by(struct e100_private *adapter)
...
@@ -606,9 +606,10 @@ eeprom_stand_by(struct e100_private *adapter)
x
=
readw
(
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
x
=
readw
(
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
x
&=
~
(
EECS
|
EESK
);
x
&=
~
(
EECS
|
EESK
);
writew
(
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
writew
(
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
wmb
();
readw
(
&
(
adapter
->
scb
->
scb_status
));
/* flush command to card */
udelay
(
EEPROM_STALL_TIME
);
udelay
(
EEPROM_STALL_TIME
);
x
|=
EECS
;
x
|=
EECS
;
writew
(
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
writew
(
x
,
&
CSR_EEPROM_CONTROL_FIELD
(
adapter
));
readw
(
&
(
adapter
->
scb
->
scb_status
));
/* flush command to card */
udelay
(
EEPROM_STALL_TIME
);
udelay
(
EEPROM_STALL_TIME
);
}
}
drivers/net/e100/e100_main.c
View file @
ed492288
...
@@ -69,6 +69,10 @@ ANY LOSS OF USE; DATA, OR PROFITS; OR BUSINESS INTERUPTION) HOWEVER CAUSED
...
@@ -69,6 +69,10 @@ ANY LOSS OF USE; DATA, OR PROFITS; OR BUSINESS INTERUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY OR
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
Portions (C) 2002 Red Hat, Inc. under the terms of the GNU GPL v2.
*******************************************************************************/
*******************************************************************************/
/**********************************************************************
/**********************************************************************
...
@@ -122,7 +126,6 @@ static int e100_do_ethtool_ioctl(struct net_device *, struct ifreq *);
...
@@ -122,7 +126,6 @@ static int e100_do_ethtool_ioctl(struct net_device *, struct ifreq *);
static
void
e100_get_speed_duplex_caps
(
struct
e100_private
*
);
static
void
e100_get_speed_duplex_caps
(
struct
e100_private
*
);
static
int
e100_ethtool_get_settings
(
struct
net_device
*
,
struct
ifreq
*
);
static
int
e100_ethtool_get_settings
(
struct
net_device
*
,
struct
ifreq
*
);
static
int
e100_ethtool_set_settings
(
struct
net_device
*
,
struct
ifreq
*
);
static
int
e100_ethtool_set_settings
(
struct
net_device
*
,
struct
ifreq
*
);
static
void
e100_set_speed_duplex
(
struct
e100_private
*
);
#ifdef ETHTOOL_GDRVINFO
#ifdef ETHTOOL_GDRVINFO
static
int
e100_ethtool_get_drvinfo
(
struct
net_device
*
,
struct
ifreq
*
);
static
int
e100_ethtool_get_drvinfo
(
struct
net_device
*
,
struct
ifreq
*
);
...
@@ -163,7 +166,8 @@ static void e100_non_tx_background(unsigned long);
...
@@ -163,7 +166,8 @@ static void e100_non_tx_background(unsigned long);
/* Global Data structures and variables */
/* Global Data structures and variables */
char
e100_copyright
[]
__devinitdata
=
"Copyright (c) 2002 Intel Corporation"
;
char
e100_copyright
[]
__devinitdata
=
"Copyright (c) 2002 Intel Corporation"
;
#define E100_VERSION "2.0.20-pre1"
#define E100_VERSION "2.0.22-pre1"
#define E100_FULL_DRIVER_NAME "Intel(R) PRO/100 Fast Ethernet Adapter - Loadable driver, ver "
#define E100_FULL_DRIVER_NAME "Intel(R) PRO/100 Fast Ethernet Adapter - Loadable driver, ver "
const
char
*
e100_version
=
E100_VERSION
;
const
char
*
e100_version
=
E100_VERSION
;
...
@@ -171,6 +175,13 @@ const char *e100_full_driver_name = E100_FULL_DRIVER_NAME E100_VERSION;
...
@@ -171,6 +175,13 @@ const char *e100_full_driver_name = E100_FULL_DRIVER_NAME E100_VERSION;
char
*
e100_short_driver_name
=
"e100"
;
char
*
e100_short_driver_name
=
"e100"
;
static
int
e100nics
=
0
;
static
int
e100nics
=
0
;
#ifdef CONFIG_PM
static
int
e100_save_state
(
struct
pci_dev
*
pcid
,
u32
state
);
static
int
e100_suspend
(
struct
pci_dev
*
pcid
,
u32
state
);
static
int
e100_enable_wake
(
struct
pci_dev
*
pcid
,
u32
state
,
int
enable
);
static
int
e100_resume
(
struct
pci_dev
*
pcid
);
#endif
/*********************************************************************/
/*********************************************************************/
/*! This is a GCC extension to ANSI C.
/*! This is a GCC extension to ANSI C.
* See the item "Labeled Elements in Initializers" in the section
* See the item "Labeled Elements in Initializers" in the section
...
@@ -203,6 +214,7 @@ struct net_device_stats *e100_get_stats(struct net_device *);
...
@@ -203,6 +214,7 @@ struct net_device_stats *e100_get_stats(struct net_device *);
static
void
e100intr
(
int
,
void
*
,
struct
pt_regs
*
);
static
void
e100intr
(
int
,
void
*
,
struct
pt_regs
*
);
static
void
e100_print_brd_conf
(
struct
e100_private
*
);
static
void
e100_print_brd_conf
(
struct
e100_private
*
);
static
void
e100_set_multi
(
struct
net_device
*
);
static
void
e100_set_multi
(
struct
net_device
*
);
void
e100_set_speed_duplex
(
struct
e100_private
*
);
char
*
e100_get_brand_msg
(
struct
e100_private
*
);
char
*
e100_get_brand_msg
(
struct
e100_private
*
);
static
u8
e100_pci_setup
(
struct
pci_dev
*
,
struct
e100_private
*
);
static
u8
e100_pci_setup
(
struct
pci_dev
*
,
struct
e100_private
*
);
...
@@ -517,6 +529,7 @@ e100_dis_intr(struct e100_private *bdp)
...
@@ -517,6 +529,7 @@ e100_dis_intr(struct e100_private *bdp)
{
{
/* Disable interrupts on our PCI board by setting the mask bit */
/* Disable interrupts on our PCI board by setting the mask bit */
writeb
(
SCB_INT_MASK
,
&
bdp
->
scb
->
scb_cmd_hi
);
writeb
(
SCB_INT_MASK
,
&
bdp
->
scb
->
scb_cmd_hi
);
readw
(
&
(
bdp
->
scb
->
scb_status
));
/* flushes last write, read-safe */
}
}
/**
/**
...
@@ -537,6 +550,7 @@ e100_trigger_SWI(struct e100_private *bdp)
...
@@ -537,6 +550,7 @@ e100_trigger_SWI(struct e100_private *bdp)
{
{
/* Trigger interrupt on our PCI board by asserting SWI bit */
/* Trigger interrupt on our PCI board by asserting SWI bit */
writeb
(
SCB_SOFT_INT
,
&
bdp
->
scb
->
scb_cmd_hi
);
writeb
(
SCB_SOFT_INT
,
&
bdp
->
scb
->
scb_cmd_hi
);
readw
(
&
(
bdp
->
scb
->
scb_status
));
/* flushes last write, read-safe */
}
}
static
int
__devinit
static
int
__devinit
...
@@ -728,8 +742,15 @@ e100_remove1(struct pci_dev *pcid)
...
@@ -728,8 +742,15 @@ e100_remove1(struct pci_dev *pcid)
}
}
#ifdef ETHTOOL_GWOL
#ifdef ETHTOOL_GWOL
/* Set up wol options and enable PME */
/* Set up wol options and enable PME if wol is enabled */
e100_do_wol
(
pcid
,
bdp
);
if
(
bdp
->
wolopts
)
{
e100_do_wol
(
pcid
,
bdp
);
/* Enable PME for power state D3 */
pci_enable_wake
(
pcid
,
3
,
1
);
/* Set power state to D1 in case driver is RELOADED */
/* If system powers down, device is switched from D1 to D3 */
pci_set_power_state
(
pcid
,
1
);
}
#endif
#endif
e100_clear_structs
(
dev
);
e100_clear_structs
(
dev
);
...
@@ -744,8 +765,15 @@ static struct pci_driver e100_driver = {
...
@@ -744,8 +765,15 @@ static struct pci_driver e100_driver = {
id_table:
e100_id_table
,
id_table:
e100_id_table
,
probe:
e100_found1
,
probe:
e100_found1
,
remove:
__devexit_p
(
e100_remove1
),
remove:
__devexit_p
(
e100_remove1
),
#ifdef CONFIG_PM
suspend:
e100_suspend
,
resume:
e100_resume
,
save_state:
e100_save_state
,
enable_wake:
e100_enable_wake
,
#else
suspend:
NULL
,
suspend:
NULL
,
resume:
NULL
,
resume:
NULL
,
#endif
};
};
static
int
__init
static
int
__init
...
@@ -1194,7 +1222,7 @@ e100_set_multi(struct net_device *dev)
...
@@ -1194,7 +1222,7 @@ e100_set_multi(struct net_device *dev)
/* reconfigure the chip if something has changed in its config space */
/* reconfigure the chip if something has changed in its config space */
e100_config
(
bdp
);
e100_config
(
bdp
);
if
(
(
promisc_enbl
)
||
(
mulcast_enbl
)
)
{
if
(
promisc_enbl
||
mulcast_enbl
)
{
goto
exit
;
/* no need for Multicast Cmd */
goto
exit
;
/* no need for Multicast Cmd */
}
}
...
@@ -1996,8 +2024,9 @@ e100_rx_srv(struct e100_private *bdp, u32 max_number_of_rfds,
...
@@ -1996,8 +2024,9 @@ e100_rx_srv(struct e100_private *bdp, u32 max_number_of_rfds,
if
(
max_number_of_rfds
&&
(
rfd_cnt
>=
max_number_of_rfds
))
{
if
(
max_number_of_rfds
&&
(
rfd_cnt
>=
max_number_of_rfds
))
{
break
;
break
;
}
}
if
(
list_empty
(
&
(
bdp
->
active_rx_list
)))
if
(
list_empty
(
&
(
bdp
->
active_rx_list
)))
{
break
;
break
;
}
rx_struct
=
list_entry
(
bdp
->
active_rx_list
.
next
,
rx_struct
=
list_entry
(
bdp
->
active_rx_list
.
next
,
struct
rx_list_elem
,
list_elem
);
struct
rx_list_elem
,
list_elem
);
...
@@ -2030,10 +2059,8 @@ e100_rx_srv(struct e100_private *bdp, u32 max_number_of_rfds,
...
@@ -2030,10 +2059,8 @@ e100_rx_srv(struct e100_private *bdp, u32 max_number_of_rfds,
(
data_sz
+
bdp
->
rfd_size
),
(
data_sz
+
bdp
->
rfd_size
),
PCI_DMA_FROMDEVICE
);
PCI_DMA_FROMDEVICE
);
// we unmap using DMA_TODEVICE to avoid another memcpy from the
// bounce buffer
pci_unmap_single
(
bdp
->
pdev
,
rx_struct
->
dma_addr
,
pci_unmap_single
(
bdp
->
pdev
,
rx_struct
->
dma_addr
,
sizeof
(
rfd_t
),
PCI_DMA_
TO
DEVICE
);
sizeof
(
rfd_t
),
PCI_DMA_
FROM
DEVICE
);
list_add
(
&
(
rx_struct
->
list_elem
),
&
(
bdp
->
rx_struct_pool
));
list_add
(
&
(
rx_struct
->
list_elem
),
&
(
bdp
->
rx_struct_pool
));
...
@@ -2146,6 +2173,34 @@ e100_refresh_txthld(struct e100_private *bdp)
...
@@ -2146,6 +2173,34 @@ e100_refresh_txthld(struct e100_private *bdp)
}
/* end underrun check */
}
/* end underrun check */
}
}
#ifdef E100_ZEROCOPY
/**
* e100_pseudo_hdr_csum - compute IP pseudo-header checksum
* @ip: points to the header of the IP packet
*
* Return the 16 bit checksum of the IP pseudo-header.,which is computed
* on the fields: IP src, IP dst, next protocol, payload length.
* The checksum vaule is returned in network byte order.
*/
static
inline
u16
e100_pseudo_hdr_csum
(
const
struct
iphdr
*
ip
)
{
u32
pseudo
=
0
;
u32
payload_len
=
0
;
payload_len
=
ntohs
(
ip
->
tot_len
)
-
(
ip
->
ihl
*
4
);
pseudo
+=
htons
(
payload_len
);
pseudo
+=
(
ip
->
protocol
<<
8
);
pseudo
+=
ip
->
saddr
&
0x0000ffff
;
pseudo
+=
(
ip
->
saddr
&
0xffff0000
)
>>
16
;
pseudo
+=
ip
->
daddr
&
0x0000ffff
;
pseudo
+=
(
ip
->
daddr
&
0xffff0000
)
>>
16
;
return
FOLD_CSUM
(
pseudo
);
}
#endif
/* E100_ZEROCOPY */
/**
/**
* e100_prepare_xmit_buff - prepare a buffer for transmission
* e100_prepare_xmit_buff - prepare a buffer for transmission
* @bdp: atapter's private data struct
* @bdp: atapter's private data struct
...
@@ -2204,10 +2259,9 @@ e100_prepare_xmit_buff(struct e100_private *bdp, struct sk_buff *skb)
...
@@ -2204,10 +2259,9 @@ e100_prepare_xmit_buff(struct e100_private *bdp, struct sk_buff *skb)
chksum
=
&
(
udp
->
check
);
chksum
=
&
(
udp
->
check
);
}
}
*
chksum
=
csum_tcpudp_magic
(
ip
->
daddr
,
ip
->
saddr
,
*
chksum
=
e100_pseudo_hdr_csum
(
ip
);
sizeof
(
struct
tcphdr
),
ip
->
protocol
,
0
);
}
}
}
else
{
}
else
{
if
(
bdp
->
flags
&
USE_IPCB
)
{
if
(
bdp
->
flags
&
USE_IPCB
)
{
tcb
->
tcbu
.
ipcb
.
ip_activation_high
=
tcb
->
tcbu
.
ipcb
.
ip_activation_high
=
...
@@ -2319,9 +2373,8 @@ e100_start_cu(struct e100_private *bdp, tcb_t *tcb)
...
@@ -2319,9 +2373,8 @@ e100_start_cu(struct e100_private *bdp, tcb_t *tcb)
if
(
!
e100_wait_cus_idle
(
bdp
))
if
(
!
e100_wait_cus_idle
(
bdp
))
printk
(
"%s cu_start: timeout waiting for cu
\n
"
,
printk
(
"%s cu_start: timeout waiting for cu
\n
"
,
bdp
->
device
->
name
);
bdp
->
device
->
name
);
if
(
!
e100_wait_exec_cmplx
(
bdp
,
(
u32
)
(
tcb
->
tcb_phys
),
if
(
!
e100_wait_exec_cmplx
(
bdp
,
(
u32
)
(
tcb
->
tcb_phys
),
SCB_CUC_START
))
{
SCB_CUC_START
))
{
printk
(
"%s cu_start: timeout waiting for scb
\n
"
,
printk
(
"%s cu_start: timeout waiting for scb
\n
"
,
bdp
->
device
->
name
);
bdp
->
device
->
name
);
e100_exec_cmplx
(
bdp
,
(
u32
)
(
tcb
->
tcb_phys
),
e100_exec_cmplx
(
bdp
,
(
u32
)
(
tcb
->
tcb_phys
),
...
@@ -2378,6 +2431,7 @@ e100_selftest(struct e100_private *bdp, u32 *st_timeout, u32 *st_result)
...
@@ -2378,6 +2431,7 @@ e100_selftest(struct e100_private *bdp, u32 *st_timeout, u32 *st_result)
/* Do the port command */
/* Do the port command */
writel
(
selftest_cmd
,
&
bdp
->
scb
->
scb_port
);
writel
(
selftest_cmd
,
&
bdp
->
scb
->
scb_port
);
readw
(
&
(
bdp
->
scb
->
scb_status
));
/* flushes last write, read-safe */
/* Wait at least 10 milliseconds for the self-test to complete */
/* Wait at least 10 milliseconds for the self-test to complete */
set_current_state
(
TASK_UNINTERRUPTIBLE
);
set_current_state
(
TASK_UNINTERRUPTIBLE
);
...
@@ -2386,8 +2440,6 @@ e100_selftest(struct e100_private *bdp, u32 *st_timeout, u32 *st_result)
...
@@ -2386,8 +2440,6 @@ e100_selftest(struct e100_private *bdp, u32 *st_timeout, u32 *st_result)
/* disable interrupts since the're now enabled */
/* disable interrupts since the're now enabled */
e100_dis_intr
(
bdp
);
e100_dis_intr
(
bdp
);
rmb
();
/* if The First Self Test DWORD Still Zero, We've timed out. If the
/* if The First Self Test DWORD Still Zero, We've timed out. If the
* second DWORD is not zero then we have an error. */
* second DWORD is not zero then we have an error. */
if
((
bdp
->
selftest
->
st_sign
==
0
)
||
(
bdp
->
selftest
->
st_result
!=
0
))
{
if
((
bdp
->
selftest
->
st_sign
==
0
)
||
(
bdp
->
selftest
->
st_result
!=
0
))
{
...
@@ -2681,7 +2733,7 @@ e100_exec_non_cu_cmd(struct e100_private *bdp, nxmit_cb_entry_t *command)
...
@@ -2681,7 +2733,7 @@ e100_exec_non_cu_cmd(struct e100_private *bdp, nxmit_cb_entry_t *command)
wmb
();
wmb
();
if
(
in_interrupt
())
if
(
in_interrupt
()
||
netif_running
(
bdp
->
device
)
)
return
e100_delayed_exec_non_cu_cmd
(
bdp
,
command
);
return
e100_delayed_exec_non_cu_cmd
(
bdp
,
command
);
spin_lock_bh
(
&
(
bdp
->
bd_non_tx_lock
));
spin_lock_bh
(
&
(
bdp
->
bd_non_tx_lock
));
...
@@ -2711,7 +2763,7 @@ e100_exec_non_cu_cmd(struct e100_private *bdp, nxmit_cb_entry_t *command)
...
@@ -2711,7 +2763,7 @@ e100_exec_non_cu_cmd(struct e100_private *bdp, nxmit_cb_entry_t *command)
bdp
->
next_cu_cmd
=
START_WAIT
;
bdp
->
next_cu_cmd
=
START_WAIT
;
spin_unlock_irqrestore
(
&
(
bdp
->
bd_lock
),
lock_flag
);
spin_unlock_irqrestore
(
&
(
bdp
->
bd_lock
),
lock_flag
);
/* now wait for completion of non-cu CB up to 20 msec*/
/* now wait for completion of non-cu CB up to 20 msec
*/
expiration_time
=
jiffies
+
HZ
/
50
+
1
;
expiration_time
=
jiffies
+
HZ
/
50
+
1
;
while
(
time_before
(
jiffies
,
expiration_time
))
{
while
(
time_before
(
jiffies
,
expiration_time
))
{
rmb
();
rmb
();
...
@@ -2754,6 +2806,7 @@ e100_sw_reset(struct e100_private *bdp, u32 reset_cmd)
...
@@ -2754,6 +2806,7 @@ e100_sw_reset(struct e100_private *bdp, u32 reset_cmd)
{
{
/* Do a selective reset first to avoid a potential PCI hang */
/* Do a selective reset first to avoid a potential PCI hang */
writel
(
PORT_SELECTIVE_RESET
,
&
bdp
->
scb
->
scb_port
);
writel
(
PORT_SELECTIVE_RESET
,
&
bdp
->
scb
->
scb_port
);
readw
(
&
(
bdp
->
scb
->
scb_status
));
/* flushes last write, read-safe */
/* wait for the reset to take effect */
/* wait for the reset to take effect */
udelay
(
20
);
udelay
(
20
);
...
@@ -3086,13 +3139,140 @@ e100_isolate_driver(struct e100_private *bdp)
...
@@ -3086,13 +3139,140 @@ e100_isolate_driver(struct e100_private *bdp)
del_timer_sync
(
&
bdp
->
watchdog_timer
);
del_timer_sync
(
&
bdp
->
watchdog_timer
);
netif_stop_queue
(
bdp
->
device
);
if
(
netif_running
(
bdp
->
device
))
netif_stop_queue
(
bdp
->
device
);
bdp
->
last_tcb
=
NULL
;
bdp
->
last_tcb
=
NULL
;
e100_sw_reset
(
bdp
,
PORT_SELECTIVE_RESET
);
e100_sw_reset
(
bdp
,
PORT_SELECTIVE_RESET
);
}
}
void
e100_set_speed_duplex
(
struct
e100_private
*
bdp
)
{
e100_phy_set_speed_duplex
(
bdp
,
true
);
e100_config_fc
(
bdp
);
/* re-config flow-control if necessary */
e100_config
(
bdp
);
}
static
void
e100_tcb_add_C_bit
(
struct
e100_private
*
bdp
)
{
tcb_t
*
tcb
=
(
tcb_t
*
)
bdp
->
tcb_pool
.
data
;
int
i
;
for
(
i
=
0
;
i
<
bdp
->
params
.
TxDescriptors
;
i
++
,
tcb
++
)
{
tcb
->
tcb_hdr
.
cb_status
|=
cpu_to_le16
(
CB_STATUS_COMPLETE
);
}
}
/*
* Procedure: e100_hw_reset_recover
*
* Description: This routine will recover the hw after reset.
*
* Arguments:
* bdp - Ptr to this card's e100_bdconfig structure
* reset_cmd - s/w reset or selective reset.
*
* Returns:
* true upon success
* false upon failure
*/
unsigned
char
e100_hw_reset_recover
(
struct
e100_private
*
bdp
,
u32
reset_cmd
)
{
bdp
->
last_tcb
=
NULL
;
if
(
reset_cmd
==
PORT_SOFTWARE_RESET
)
{
/*load CU & RU base */
if
(
!
e100_wait_exec_cmplx
(
bdp
,
0
,
SCB_CUC_LOAD_BASE
))
{
return
false
;
}
if
(
e100_load_microcode
(
bdp
))
{
bdp
->
flags
|=
DF_UCODE_LOADED
;
}
if
(
!
e100_wait_exec_cmplx
(
bdp
,
0
,
SCB_RUC_LOAD_BASE
))
{
return
false
;
}
/* Issue the load dump counters address command */
if
(
!
e100_wait_exec_cmplx
(
bdp
,
bdp
->
stat_cnt_phys
,
SCB_CUC_DUMP_ADDR
))
{
return
false
;
}
if
(
!
e100_setup_iaaddr
(
bdp
,
bdp
->
device
->
dev_addr
))
{
printk
(
KERN_ERR
"e100_hw_reset_recover: setup iaaddr failed
\n
"
);
return
false
;
}
e100_set_multi_exec
(
bdp
->
device
);
/* Change for 82558 enhancement */
/* If 82558/9 and if the user has enabled flow control, set up * the
* Flow Control Reg. in the CSR */
if
((
bdp
->
flags
&
IS_BACHELOR
)
&&
(
bdp
->
params
.
b_params
&
PRM_FC
))
{
writeb
(
DFLT_FC_THLD
,
&
bdp
->
scb
->
scb_ext
.
d101_scb
.
scb_fc_thld
);
writeb
(
DFLT_FC_CMD
,
&
bdp
->
scb
->
scb_ext
.
d101_scb
.
scb_fc_xon_xoff
);
}
}
e100_force_config
(
bdp
);
return
true
;
}
void
e100_deisolate_driver
(
struct
e100_private
*
bdp
,
u8
recover
,
u8
full_init
)
{
if
(
full_init
)
{
e100_sw_reset
(
bdp
,
PORT_SOFTWARE_RESET
);
if
(
!
e100_hw_reset_recover
(
bdp
,
PORT_SOFTWARE_RESET
))
printk
(
KERN_ERR
"e100_deisolate_driver:"
" HW SOFTWARE reset recover failed
\n
"
);
}
if
(
recover
)
{
bdp
->
next_cu_cmd
=
START_WAIT
;
bdp
->
last_tcb
=
NULL
;
/* lets reset the chip */
if
(
!
full_init
)
{
e100_sw_reset
(
bdp
,
PORT_SELECTIVE_RESET
);
if
(
!
e100_hw_reset_recover
(
bdp
,
PORT_SELECTIVE_RESET
))
{
printk
(
KERN_ERR
"e100_deisolate_driver:"
" HW reset recover failed
\n
"
);
}
}
e100_start_ru
(
bdp
);
/* relaunch watchdog timer in 2 sec */
mod_timer
(
&
(
bdp
->
watchdog_timer
),
jiffies
+
(
2
*
HZ
));
// we must clear tcbs since we may have lost Tx intrrupt
// or have unsent frames on the tcb chain
e100_tcb_add_C_bit
(
bdp
);
e100_tx_srv
(
bdp
);
e100_set_intr_mask
(
bdp
);
if
(
netif_running
(
bdp
->
device
))
netif_wake_queue
(
bdp
->
device
);
}
bdp
->
driver_isolated
=
false
;
}
#ifdef E100_ETHTOOL_IOCTL
#ifdef E100_ETHTOOL_IOCTL
static
int
static
int
e100_do_ethtool_ioctl
(
struct
net_device
*
dev
,
struct
ifreq
*
ifr
)
e100_do_ethtool_ioctl
(
struct
net_device
*
dev
,
struct
ifreq
*
ifr
)
...
@@ -3458,14 +3638,6 @@ e100_get_speed_duplex_caps(struct e100_private *bdp)
...
@@ -3458,14 +3638,6 @@ e100_get_speed_duplex_caps(struct e100_private *bdp)
}
}
static
void
e100_set_speed_duplex
(
struct
e100_private
*
bdp
)
{
e100_phy_set_speed_duplex
(
bdp
,
true
);
e100_config_fc
(
bdp
);
/* re-config flow-control if necessary */
e100_config
(
bdp
);
}
#ifdef ETHTOOL_GWOL
#ifdef ETHTOOL_GWOL
static
unsigned
char
static
unsigned
char
e100_setup_filter
(
struct
e100_private
*
bdp
)
e100_setup_filter
(
struct
e100_private
*
bdp
)
...
@@ -3510,32 +3682,19 @@ e100_setup_filter(struct e100_private *bdp)
...
@@ -3510,32 +3682,19 @@ e100_setup_filter(struct e100_private *bdp)
static
void
static
void
e100_do_wol
(
struct
pci_dev
*
pcid
,
struct
e100_private
*
bdp
)
e100_do_wol
(
struct
pci_dev
*
pcid
,
struct
e100_private
*
bdp
)
{
{
int
enable
=
0
;
e100_config_wol
(
bdp
);
u32
state
=
0
;
if
(
bdp
->
wolopts
)
{
e100_config_wol
(
bdp
);
if
(
!
e100_config
(
bdp
))
{
if
(
e100_config
(
bdp
))
{
printk
(
"e100_config WOL options failed
\n
"
);
if
(
bdp
->
wolopts
&
(
WAKE_UCAST
|
WAKE_ARP
))
goto
exit
;
if
(
!
e100_setup_filter
(
bdp
))
}
printk
(
KERN_ERR
"e100_config WOL options failed
\n
"
);
if
(
bdp
->
wolopts
&
(
WAKE_UCAST
|
WAKE_ARP
))
{
}
else
{
if
(
!
e100_setup_filter
(
bdp
))
{
printk
(
KERN_ERR
"e100_config WOL failed
\n
"
);
printk
(
"e100_config WOL options failed
\n
"
);
goto
exit
;
}
state
=
1
;
pci_set_power_state
(
pcid
,
state
);
}
enable
=
1
;
}
}
exit:
pci_enable_wake
(
pcid
,
state
,
enable
);
}
}
static
u16
static
u16
e100_get_ip_lbytes
(
struct
net_device
*
dev
)
e100_get_ip_lbytes
(
struct
net_device
*
dev
)
{
{
struct
in_ifaddr
*
ifa
;
struct
in_ifaddr
*
ifa
;
...
@@ -3795,3 +3954,84 @@ e100_non_tx_background(unsigned long ptr)
...
@@ -3795,3 +3954,84 @@ e100_non_tx_background(unsigned long ptr)
}
}
spin_unlock_bh
(
&
(
bdp
->
bd_non_tx_lock
));
spin_unlock_bh
(
&
(
bdp
->
bd_non_tx_lock
));
}
}
#ifdef CONFIG_PM
static
int
e100_save_state
(
struct
pci_dev
*
pcid
,
u32
state
)
{
struct
net_device
*
dev
;
struct
e100_private
*
bdp
;
/* Actually, PCI PM does NOT call this entry */
if
(
!
(
dev
=
(
struct
net_device
*
)
pci_get_drvdata
(
pcid
)))
return
-
1
;
bdp
=
dev
->
priv
;
pci_save_state
(
pcid
,
bdp
->
pci_state
);
return
0
;
}
static
int
e100_suspend
(
struct
pci_dev
*
pcid
,
u32
state
)
{
struct
net_device
*
netdev
=
pci_get_drvdata
(
pcid
);
struct
e100_private
*
bdp
=
netdev
->
priv
;
e100_isolate_driver
(
bdp
);
e100_save_state
(
pcid
,
state
);
/* If wol is enabled */
#ifdef ETHTOOL_GWOL
if
(
bdp
->
wolopts
)
{
bdp
->
ip_lbytes
=
e100_get_ip_lbytes
(
netdev
);
e100_do_wol
(
pcid
,
bdp
);
pci_enable_wake
(
pcid
,
3
,
1
);
/* Enable PME for power state D3 */
pci_set_power_state
(
pcid
,
3
);
/* Set power state to D3. */
}
else
{
/* Disable bus mastering */
pci_disable_device
(
pcid
);
pci_set_power_state
(
pcid
,
state
);
}
#else
pci_disable_device
(
pcid
);
pci_set_power_state
(
pcid
,
state
);
#endif
return
0
;
}
static
int
e100_resume
(
struct
pci_dev
*
pcid
)
{
struct
net_device
*
netdev
=
pci_get_drvdata
(
pcid
);
struct
e100_private
*
bdp
=
netdev
->
priv
;
u8
recover
=
false
;
u8
full_init
=
false
;
pci_set_power_state
(
pcid
,
0
);
pci_enable_wake
(
pcid
,
0
,
0
);
/* Clear PME status and disable PME */
pci_restore_state
(
pcid
,
bdp
->
pci_state
);
if
(
netif_running
(
netdev
))
{
recover
=
true
;
}
#ifdef ETHTOOL_GWOL
if
(
bdp
->
wolopts
&
(
WAKE_UCAST
|
WAKE_ARP
))
{
full_init
=
true
;
}
#endif
e100_deisolate_driver
(
bdp
,
recover
,
full_init
);
return
0
;
}
static
int
e100_enable_wake
(
struct
pci_dev
*
pcid
,
u32
state
,
int
enable
)
{
/* Driver doesn't need to do anything because it will enable */
/* wol when suspended. */
/* Actually, PCI PM does NOT call this entry. */
return
0
;
}
#endif
/* CONFIG_PM */
drivers/net/e100/e100_phy.c
View file @
ed492288
...
@@ -769,7 +769,7 @@ e100_set_fc(struct e100_private *bdp)
...
@@ -769,7 +769,7 @@ e100_set_fc(struct e100_private *bdp)
* Arguments: bdp - Pointer to the e100_private structure for the board
* Arguments: bdp - Pointer to the e100_private structure for the board
*
*
* Returns: true if link state was changed
* Returns: true if link state was changed
*
B_FLASE
otherwise
*
false
otherwise
*
*
*/
*/
unsigned
char
unsigned
char
...
...
drivers/net/eepro100.c
View file @
ed492288
...
@@ -64,8 +64,8 @@ static int debug = -1; /* The debug level */
...
@@ -64,8 +64,8 @@ static int debug = -1; /* The debug level */
/* A few values that may be tweaked. */
/* A few values that may be tweaked. */
/* The ring sizes should be a power of two for efficiency. */
/* The ring sizes should be a power of two for efficiency. */
#define TX_RING_SIZE
32
#define TX_RING_SIZE
64
#define RX_RING_SIZE
32
#define RX_RING_SIZE
64
/* How much slots multicast filter setup may take.
/* How much slots multicast filter setup may take.
Do not descrease without changing set_rx_mode() implementaion. */
Do not descrease without changing set_rx_mode() implementaion. */
#define TX_MULTICAST_SIZE 2
#define TX_MULTICAST_SIZE 2
...
@@ -570,6 +570,19 @@ static int __devinit eepro100_init_one (struct pci_dev *pdev,
...
@@ -570,6 +570,19 @@ static int __devinit eepro100_init_one (struct pci_dev *pdev,
if
(
speedo_debug
>
0
&&
did_version
++
==
0
)
if
(
speedo_debug
>
0
&&
did_version
++
==
0
)
printk
(
version
);
printk
(
version
);
/* save power state before pci_enable_device overwrites it */
pm
=
pci_find_capability
(
pdev
,
PCI_CAP_ID_PM
);
if
(
pm
)
{
u16
pwr_command
;
pci_read_config_word
(
pdev
,
pm
+
PCI_PM_CTRL
,
&
pwr_command
);
acpi_idle_state
=
pwr_command
&
PCI_PM_CTRL_STATE_MASK
;
}
if
(
pci_enable_device
(
pdev
))
goto
err_out_free_mmio_region
;
pci_set_master
(
pdev
);
if
(
!
request_region
(
pci_resource_start
(
pdev
,
1
),
if
(
!
request_region
(
pci_resource_start
(
pdev
,
1
),
pci_resource_len
(
pdev
,
1
),
"eepro100"
))
{
pci_resource_len
(
pdev
,
1
),
"eepro100"
))
{
printk
(
KERN_ERR
"eepro100: cannot reserve I/O ports
\n
"
);
printk
(
KERN_ERR
"eepro100: cannot reserve I/O ports
\n
"
);
...
@@ -600,18 +613,6 @@ static int __devinit eepro100_init_one (struct pci_dev *pdev,
...
@@ -600,18 +613,6 @@ static int __devinit eepro100_init_one (struct pci_dev *pdev,
pci_resource_start
(
pdev
,
0
),
irq
);
pci_resource_start
(
pdev
,
0
),
irq
);
#endif
#endif
/* save power state b4 pci_enable_device overwrites it */
pm
=
pci_find_capability
(
pdev
,
PCI_CAP_ID_PM
);
if
(
pm
)
{
u16
pwr_command
;
pci_read_config_word
(
pdev
,
pm
+
PCI_PM_CTRL
,
&
pwr_command
);
acpi_idle_state
=
pwr_command
&
PCI_PM_CTRL_STATE_MASK
;
}
if
(
pci_enable_device
(
pdev
))
goto
err_out_free_mmio_region
;
pci_set_master
(
pdev
);
if
(
speedo_found1
(
pdev
,
ioaddr
,
cards_found
,
acpi_idle_state
)
==
0
)
if
(
speedo_found1
(
pdev
,
ioaddr
,
cards_found
,
acpi_idle_state
)
==
0
)
cards_found
++
;
cards_found
++
;
...
@@ -1074,6 +1075,51 @@ static void speedo_resume(struct net_device *dev)
...
@@ -1074,6 +1075,51 @@ static void speedo_resume(struct net_device *dev)
outw
(
CUStart
|
SCBMaskEarlyRx
|
SCBMaskFlowCtl
,
ioaddr
+
SCBCmd
);
outw
(
CUStart
|
SCBMaskEarlyRx
|
SCBMaskFlowCtl
,
ioaddr
+
SCBCmd
);
}
}
/*
* Sometimes the receiver stops making progress. This routine knows how to
* get it going again, without losing packets or being otherwise nasty like
* a chip reset would be. Previously the driver had a whole sequence
* of if RxSuspended, if it's no buffers do one thing, if it's no resources,
* do another, etc. But those things don't really matter. Separate logic
* in the ISR provides for allocating buffers--the other half of operation
* is just making sure the receiver is active. speedo_rx_soft_reset does that.
* This problem with the old, more involved algorithm is shown up under
* ping floods on the order of 60K packets/second on a 100Mbps fdx network.
*/
static
void
speedo_rx_soft_reset
(
struct
net_device
*
dev
)
{
struct
speedo_private
*
sp
=
dev
->
priv
;
struct
RxFD
*
rfd
;
long
ioaddr
;
ioaddr
=
dev
->
base_addr
;
wait_for_cmd_done
(
ioaddr
+
SCBCmd
);
if
(
inb
(
ioaddr
+
SCBCmd
)
!=
0
)
{
printk
(
"%s: previous command stalled
\n
"
,
dev
->
name
);
return
;
}
/*
* Put the hardware into a known state.
*/
outb
(
RxAbort
,
ioaddr
+
SCBCmd
);
rfd
=
sp
->
rx_ringp
[
sp
->
cur_rx
%
RX_RING_SIZE
];
rfd
->
rx_buf_addr
=
0xffffffff
;
wait_for_cmd_done
(
ioaddr
+
SCBCmd
);
if
(
inb
(
ioaddr
+
SCBCmd
)
!=
0
)
{
printk
(
"%s: RxAbort command stalled
\n
"
,
dev
->
name
);
return
;
}
outl
(
sp
->
rx_ring_dma
[
sp
->
cur_rx
%
RX_RING_SIZE
],
ioaddr
+
SCBPointer
);
outb
(
RxStart
,
ioaddr
+
SCBCmd
);
}
/* Media monitoring and control. */
/* Media monitoring and control. */
static
void
speedo_timer
(
unsigned
long
data
)
static
void
speedo_timer
(
unsigned
long
data
)
{
{
...
@@ -1377,9 +1423,10 @@ speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
...
@@ -1377,9 +1423,10 @@ speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* workaround for hardware bug on 10 mbit half duplex */
/* workaround for hardware bug on 10 mbit half duplex */
if
((
sp
->
partner
==
0
)
||
(
sp
->
chip_id
==
1
))
{
if
((
sp
->
partner
==
0
)
&&
(
sp
->
chip_id
==
1
))
{
wait_for_cmd_done
(
ioaddr
+
SCBCmd
);
wait_for_cmd_done
(
ioaddr
+
SCBCmd
);
outb
(
0
,
ioaddr
+
SCBCmd
);
outb
(
0
,
ioaddr
+
SCBCmd
);
udelay
(
1
);
}
}
/* Trigger the command unit resume. */
/* Trigger the command unit resume. */
...
@@ -1507,82 +1554,39 @@ static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
...
@@ -1507,82 +1554,39 @@ static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
if
((
status
&
0xfc00
)
==
0
)
if
((
status
&
0xfc00
)
==
0
)
break
;
break
;
/* Always check if all rx buffers are allocated. --SAW */
speedo_refill_rx_buffers
(
dev
,
0
);
if
((
status
&
0x5000
)
||
/* Packet received, or Rx error. */
if
((
status
&
0x5000
)
||
/* Packet received, or Rx error. */
(
sp
->
rx_ring_state
&
(
RrNoMem
|
RrPostponed
))
==
RrPostponed
)
(
sp
->
rx_ring_state
&
(
RrNoMem
|
RrPostponed
))
==
RrPostponed
)
/* Need to gather the postponed packet. */
/* Need to gather the postponed packet. */
speedo_rx
(
dev
);
speedo_rx
(
dev
);
if
(
status
&
0x1000
)
{
/* Always check if all rx buffers are allocated. --SAW */
spin_lock
(
&
sp
->
lock
);
speedo_refill_rx_buffers
(
dev
,
0
);
if
((
status
&
0x003c
)
==
0x0028
)
{
/* No more Rx buffers. */
struct
RxFD
*
rxf
;
spin_lock
(
&
sp
->
lock
);
printk
(
KERN_WARNING
"%s: card reports no RX buffers.
\n
"
,
/*
dev
->
name
);
* The chip may have suspended reception for various reasons.
rxf
=
sp
->
rx_ringp
[
sp
->
cur_rx
%
RX_RING_SIZE
];
* Check for that, and re-prime it should this be the case.
if
(
rxf
==
NULL
)
{
*/
if
(
speedo_debug
>
2
)
switch
((
status
>>
2
)
&
0xf
)
{
printk
(
KERN_DEBUG
case
0
:
/* Idle */
"%s: NULL cur_rx in speedo_interrupt().
\n
"
,
break
;
dev
->
name
);
case
1
:
/* Suspended */
sp
->
rx_ring_state
|=
RrNoMem
|
RrNoResources
;
case
2
:
/* No resources (RxFDs) */
}
else
if
(
rxf
==
sp
->
last_rxf
)
{
case
9
:
/* Suspended with no more RBDs */
if
(
speedo_debug
>
2
)
case
10
:
/* No resources due to no RBDs */
printk
(
KERN_DEBUG
case
12
:
/* Ready with no RBDs */
"%s: cur_rx is last in speedo_interrupt().
\n
"
,
speedo_rx_soft_reset
(
dev
);
dev
->
name
);
break
;
sp
->
rx_ring_state
|=
RrNoMem
|
RrNoResources
;
case
3
:
case
5
:
case
6
:
case
7
:
case
8
:
}
else
case
11
:
case
13
:
case
14
:
case
15
:
outb
(
RxResumeNoResources
,
ioaddr
+
SCBCmd
);
/* these are all reserved values */
}
else
if
((
status
&
0x003c
)
==
0x0008
)
{
/* No resources. */
break
;
struct
RxFD
*
rxf
;
printk
(
KERN_WARNING
"%s: card reports no resources.
\n
"
,
dev
->
name
);
rxf
=
sp
->
rx_ringp
[
sp
->
cur_rx
%
RX_RING_SIZE
];
if
(
rxf
==
NULL
)
{
if
(
speedo_debug
>
2
)
printk
(
KERN_DEBUG
"%s: NULL cur_rx in speedo_interrupt().
\n
"
,
dev
->
name
);
sp
->
rx_ring_state
|=
RrNoMem
|
RrNoResources
;
}
else
if
(
rxf
==
sp
->
last_rxf
)
{
if
(
speedo_debug
>
2
)
printk
(
KERN_DEBUG
"%s: cur_rx is last in speedo_interrupt().
\n
"
,
dev
->
name
);
sp
->
rx_ring_state
|=
RrNoMem
|
RrNoResources
;
}
else
{
/* Restart the receiver. */
outl
(
sp
->
rx_ring_dma
[
sp
->
cur_rx
%
RX_RING_SIZE
],
ioaddr
+
SCBPointer
);
outb
(
RxStart
,
ioaddr
+
SCBCmd
);
}
}
sp
->
stats
.
rx_errors
++
;
spin_unlock
(
&
sp
->
lock
);
}
if
((
sp
->
rx_ring_state
&
(
RrNoMem
|
RrNoResources
))
==
RrNoResources
)
{
printk
(
KERN_WARNING
"%s: restart the receiver after a possible hang.
\n
"
,
dev
->
name
);
spin_lock
(
&
sp
->
lock
);
/* Restart the receiver.
I'm not sure if it's always right to restart the receiver
here but I don't know another way to prevent receiver hangs.
1999/12/25 SAW */
outl
(
sp
->
rx_ring_dma
[
sp
->
cur_rx
%
RX_RING_SIZE
],
ioaddr
+
SCBPointer
);
outb
(
RxStart
,
ioaddr
+
SCBCmd
);
sp
->
rx_ring_state
&=
~
RrNoResources
;
spin_unlock
(
&
sp
->
lock
);
}
}
/* User interrupt, Command/Tx unit interrupt or CU not active. */
/* User interrupt, Command/Tx unit interrupt or CU not active. */
if
(
status
&
0xA400
)
{
if
(
status
&
0xA400
)
{
spin_lock
(
&
sp
->
lock
);
speedo_tx_buffer_gc
(
dev
);
speedo_tx_buffer_gc
(
dev
);
if
(
sp
->
tx_full
if
(
sp
->
tx_full
&&
(
int
)(
sp
->
cur_tx
-
sp
->
dirty_tx
)
<
TX_QUEUE_UNFULL
)
{
&&
(
int
)(
sp
->
cur_tx
-
sp
->
dirty_tx
)
<
TX_QUEUE_UNFULL
)
{
...
@@ -1590,8 +1594,9 @@ static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
...
@@ -1590,8 +1594,9 @@ static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
sp
->
tx_full
=
0
;
sp
->
tx_full
=
0
;
netif_wake_queue
(
dev
);
/* Attention: under a spinlock. --SAW */
netif_wake_queue
(
dev
);
/* Attention: under a spinlock. --SAW */
}
}
spin_unlock
(
&
sp
->
lock
);
}
}
spin_unlock
(
&
sp
->
lock
);
if
(
--
boguscnt
<
0
)
{
if
(
--
boguscnt
<
0
)
{
printk
(
KERN_ERR
"%s: Too much work at interrupt, status=0x%4.4x.
\n
"
,
printk
(
KERN_ERR
"%s: Too much work at interrupt, status=0x%4.4x.
\n
"
,
...
@@ -2202,6 +2207,8 @@ static int eepro100_suspend(struct pci_dev *pdev, u32 state)
...
@@ -2202,6 +2207,8 @@ static int eepro100_suspend(struct pci_dev *pdev, u32 state)
if
(
!
netif_running
(
dev
))
if
(
!
netif_running
(
dev
))
return
0
;
return
0
;
del_timer_sync
(
&
sp
->
timer
);
netif_device_detach
(
dev
);
netif_device_detach
(
dev
);
outl
(
PortPartialReset
,
ioaddr
+
SCBPort
);
outl
(
PortPartialReset
,
ioaddr
+
SCBPort
);
...
@@ -2234,6 +2241,8 @@ static int eepro100_resume(struct pci_dev *pdev)
...
@@ -2234,6 +2241,8 @@ static int eepro100_resume(struct pci_dev *pdev)
sp
->
rx_mode
=
-
1
;
sp
->
rx_mode
=
-
1
;
sp
->
flow_ctrl
=
sp
->
partner
=
0
;
sp
->
flow_ctrl
=
sp
->
partner
=
0
;
set_rx_mode
(
dev
);
set_rx_mode
(
dev
);
sp
->
timer
.
expires
=
RUN_AT
(
2
*
HZ
);
add_timer
(
&
sp
->
timer
);
return
0
;
return
0
;
}
}
#endif
/* CONFIG_PM */
#endif
/* CONFIG_PM */
...
...
drivers/net/tg3.c
View file @
ed492288
/* $Id: tg3.c,v 1.43.2.7
4 2002/03/06 22:22:29
davem Exp $
/* $Id: tg3.c,v 1.43.2.7
9 2002/03/12 07:11:17
davem Exp $
* tg3.c: Broadcom Tigon3 ethernet driver.
* tg3.c: Broadcom Tigon3 ethernet driver.
*
*
* Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)
...
@@ -42,7 +42,11 @@
...
@@ -42,7 +42,11 @@
*/
*/
#define TG3_MINI_RING_WORKS 0
#define TG3_MINI_RING_WORKS 0
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define TG3_VLAN_TAG_USED 1
#else
#define TG3_VLAN_TAG_USED 0
#define TG3_VLAN_TAG_USED 0
#endif
#include "tg3.h"
#include "tg3.h"
...
@@ -78,15 +82,16 @@
...
@@ -78,15 +82,16 @@
* them in the NIC onboard memory.
* them in the NIC onboard memory.
*/
*/
#define TG3_RX_RING_SIZE 512
#define TG3_RX_RING_SIZE 512
#define TG3_RX_RING_PENDING 200
#define TG3_
DEF_
RX_RING_PENDING 200
#if TG3_MINI_RING_WORKS
#if TG3_MINI_RING_WORKS
#define TG3_RX_MINI_RING_SIZE 256
/* ??? */
#define TG3_RX_MINI_RING_SIZE 256
/* ??? */
#define TG3_RX_MINI_RING_PENDING 100
#define TG3_
DEF_
RX_MINI_RING_PENDING 100
#endif
#endif
#define TG3_RX_JUMBO_RING_SIZE 256
#define TG3_RX_JUMBO_RING_SIZE 256
#define TG3_RX_JUMBO_RING_PENDING 100
#define TG3_
DEF_
RX_JUMBO_RING_PENDING 100
#define TG3_RX_RCB_RING_SIZE 1024
#define TG3_RX_RCB_RING_SIZE 1024
#define TG3_TX_RING_SIZE 512
#define TG3_TX_RING_SIZE 512
#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
TG3_RX_RING_SIZE)
TG3_RX_RING_SIZE)
...
@@ -100,10 +105,12 @@
...
@@ -100,10 +105,12 @@
TG3_RX_RCB_RING_SIZE)
TG3_RX_RCB_RING_SIZE)
#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
TG3_TX_RING_SIZE)
TG3_TX_RING_SIZE)
#define TX_RING_GAP(TP) \
(TG3_TX_RING_SIZE - (TP)->tx_pending)
#define TX_BUFFS_AVAIL(TP) \
#define TX_BUFFS_AVAIL(TP) \
(((TP)->tx_cons <= (TP)->tx_prod) ? \
(((TP)->tx_cons <= (TP)->tx_prod) ? \
(TP)->tx_cons + (T
G3_TX_RING_SIZE - 1) - (TP)->tx_prod :
\
(TP)->tx_cons + (T
P)->tx_pending - (TP)->tx_prod :
\
(TP)->tx_cons - (TP)->tx_prod -
1
)
(TP)->tx_cons - (TP)->tx_prod -
TX_RING_GAP(TP)
)
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
...
@@ -1346,6 +1353,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp)
...
@@ -1346,6 +1353,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp)
aninfo
.
flags
|=
(
MR_AN_ENABLE
);
aninfo
.
flags
|=
(
MR_AN_ENABLE
);
for
(
i
=
0
;
i
<
6
;
i
++
)
{
for
(
i
=
0
;
i
<
6
;
i
++
)
{
unsigned
int
tick
;
u32
tmp
;
u32
tmp
;
tw32
(
MAC_TX_AUTO_NEG
,
0
);
tw32
(
MAC_TX_AUTO_NEG
,
0
);
...
@@ -1358,7 +1366,8 @@ static int tg3_setup_fiber_phy(struct tg3 *tp)
...
@@ -1358,7 +1366,8 @@ static int tg3_setup_fiber_phy(struct tg3 *tp)
aninfo
.
state
=
ANEG_STATE_UNKNOWN
;
aninfo
.
state
=
ANEG_STATE_UNKNOWN
;
aninfo
.
cur_time
=
0
;
aninfo
.
cur_time
=
0
;
while
(
aninfo
.
cur_time
<
95000
)
{
tick
=
0
;
while
(
++
tick
<
95000
)
{
status
=
tg3_fiber_aneg_smachine
(
tp
,
&
aninfo
);
status
=
tg3_fiber_aneg_smachine
(
tp
,
&
aninfo
);
if
(
status
==
ANEG_DONE
||
if
(
status
==
ANEG_DONE
||
status
==
ANEG_FAILED
)
status
==
ANEG_FAILED
)
...
@@ -1782,7 +1791,7 @@ static void tg3_rx(struct tg3 *tp)
...
@@ -1782,7 +1791,7 @@ static void tg3_rx(struct tg3 *tp)
skb
=
copy_skb
;
skb
=
copy_skb
;
}
}
if
(
!
(
tp
->
tg3_flags
&
TG3_FLAG_BROKEN
_CHECKSUMS
)
&&
if
(
(
tp
->
tg3_flags
&
TG3_FLAG_RX
_CHECKSUMS
)
&&
(
desc
->
type_flags
&
RXD_FLAG_TCPUDP_CSUM
))
{
(
desc
->
type_flags
&
RXD_FLAG_TCPUDP_CSUM
))
{
skb
->
csum
=
htons
((
desc
->
ip_tcp_csum
&
RXD_TCPCSUM_MASK
)
skb
->
csum
=
htons
((
desc
->
ip_tcp_csum
&
RXD_TCPCSUM_MASK
)
>>
RXD_TCPCSUM_SHIFT
);
>>
RXD_TCPCSUM_SHIFT
);
...
@@ -1835,61 +1844,99 @@ static void tg3_rx(struct tg3 *tp)
...
@@ -1835,61 +1844,99 @@ static void tg3_rx(struct tg3 *tp)
#endif
#endif
}
}
#define RATE_SAMPLE_INTERVAL (1 * HZ)
#define PKT_RATE_LOW 22000
#define PKT_RATE_LOW 22000
#define PKT_RATE_HIGH 61000
#define PKT_RATE_HIGH 61000
static
void
tg3_rate_sample
(
struct
tg3
*
tp
,
unsigned
long
ticks
)
static
void
tg3_rate_sample
(
struct
tg3
*
tp
,
unsigned
long
ticks
)
{
{
u32
delta
,
rx_now
,
tx_now
;
u32
delta
,
rx_now
,
tx_now
;
int
new_vals
;
int
new_vals
,
do_tx
,
do_rx
;
rx_now
=
tp
->
hw_stats
->
rx_ucast_packets
.
low
;
rx_now
=
tp
->
hw_stats
->
rx_ucast_packets
.
low
;
tx_now
=
tp
->
hw_stats
->
COS_out_packets
[
0
].
low
;
tx_now
=
tp
->
hw_stats
->
COS_out_packets
[
0
].
low
;
delta
=
(
rx_now
-
tp
->
last_rx_count
);
delta
=
(
rx_now
-
tp
->
last_rx_count
);
delta
+=
(
tx_now
-
tp
->
last_tx_count
);
delta
+=
(
tx_now
-
tp
->
last_tx_count
);
delta
/=
(
ticks
/
RATE_SAMPLE_INTERVAL
);
delta
/=
(
ticks
/
tp
->
coalesce_config
.
rate_sample_jiffies
);
tp
->
last_rx_count
=
rx_now
;
tp
->
last_rx_count
=
rx_now
;
tp
->
last_tx_count
=
tx_now
;
tp
->
last_tx_count
=
tx_now
;
new_vals
=
0
;
new_vals
=
0
;
if
(
delta
<
PKT_RATE_LOW
)
{
do_tx
=
(
tp
->
tg3_flags
&
TG3_FLAG_ADAPTIVE_TX
)
!=
0
;
if
(
tp
->
coalesce_config
.
rx_max_coalesced_frames
!=
do_rx
=
(
tp
->
tg3_flags
&
TG3_FLAG_ADAPTIVE_RX
)
!=
0
;
LOW_RXMAX_FRAMES
)
{
if
(
delta
<
tp
->
coalesce_config
.
pkt_rate_low
)
{
if
(
do_rx
&&
tp
->
coalesce_config
.
rx_max_coalesced_frames
!=
tp
->
coalesce_config
.
rx_max_coalesced_frames_low
)
{
tp
->
coalesce_config
.
rx_max_coalesced_frames
=
tp
->
coalesce_config
.
rx_max_coalesced_frames
=
LOW_RXMAX_FRAMES
;
LOW_RXMAX_FRAMES
;
tp
->
coalesce_config
.
rx_coalesce_ticks
=
tp
->
coalesce_config
.
rx_coalesce_ticks
=
LOW_RXCOL_TICKS
;
LOW_RXCOL_TICKS
;
new_vals
=
1
;
new_vals
=
1
;
}
}
}
else
if
(
delta
<
PKT_RATE_HIGH
)
{
if
(
do_tx
&&
if
(
tp
->
coalesce_config
.
rx_max_coalesced_frames
!=
tp
->
coalesce_config
.
tx_max_coalesced_frames
!=
DEFAULT_RXMAX_FRAMES
)
{
tp
->
coalesce_config
.
tx_max_coalesced_frames_low
)
{
tp
->
coalesce_config
.
tx_max_coalesced_frames
=
tp
->
coalesce_config
.
tx_max_coalesced_frames_low
;
tp
->
coalesce_config
.
tx_coalesce_ticks
=
tp
->
coalesce_config
.
tx_coalesce_ticks_low
;
new_vals
=
1
;
}
}
else
if
(
delta
<
tp
->
coalesce_config
.
pkt_rate_high
)
{
if
(
do_rx
&&
tp
->
coalesce_config
.
rx_max_coalesced_frames
!=
tp
->
coalesce_config
.
rx_max_coalesced_frames_def
)
{
tp
->
coalesce_config
.
rx_max_coalesced_frames
=
tp
->
coalesce_config
.
rx_max_coalesced_frames
=
DEFAULT_RXMAX_FRAMES
;
tp
->
coalesce_config
.
rx_max_coalesced_frames_def
;
tp
->
coalesce_config
.
rx_coalesce_ticks
=
tp
->
coalesce_config
.
rx_coalesce_ticks
=
DEFAULT_RXCOL_TICKS
;
tp
->
coalesce_config
.
rx_coalesce_ticks_def
;
new_vals
=
1
;
}
if
(
do_tx
&&
tp
->
coalesce_config
.
tx_max_coalesced_frames
!=
tp
->
coalesce_config
.
tx_max_coalesced_frames_def
)
{
tp
->
coalesce_config
.
tx_max_coalesced_frames
=
tp
->
coalesce_config
.
tx_max_coalesced_frames_def
;
tp
->
coalesce_config
.
tx_coalesce_ticks
=
tp
->
coalesce_config
.
tx_coalesce_ticks_def
;
new_vals
=
1
;
new_vals
=
1
;
}
}
}
else
{
}
else
{
if
(
tp
->
coalesce_config
.
rx_max_coalesced_frames
!=
if
(
do_rx
&&
HIGH_RXMAX_FRAMES
)
{
tp
->
coalesce_config
.
rx_max_coalesced_frames
!=
tp
->
coalesce_config
.
rx_max_coalesced_frames_high
)
{
tp
->
coalesce_config
.
rx_max_coalesced_frames
=
tp
->
coalesce_config
.
rx_max_coalesced_frames
=
HIGH_RXMAX_FRAMES
;
tp
->
coalesce_config
.
rx_max_coalesced_frames_high
;
tp
->
coalesce_config
.
rx_coalesce_ticks
=
tp
->
coalesce_config
.
rx_coalesce_ticks
=
HIGH_RXCOL_TICKS
;
tp
->
coalesce_config
.
rx_coalesce_ticks_high
;
new_vals
=
1
;
}
if
(
do_tx
&&
tp
->
coalesce_config
.
tx_max_coalesced_frames
!=
tp
->
coalesce_config
.
tx_max_coalesced_frames_high
)
{
tp
->
coalesce_config
.
tx_max_coalesced_frames
=
tp
->
coalesce_config
.
tx_max_coalesced_frames_high
;
tp
->
coalesce_config
.
tx_coalesce_ticks
=
tp
->
coalesce_config
.
tx_coalesce_ticks_high
;
new_vals
=
1
;
new_vals
=
1
;
}
}
}
}
if
(
new_vals
)
{
if
(
new_vals
)
{
tw32
(
HOSTCC_RXCOL_TICKS
,
if
(
do_rx
)
{
tp
->
coalesce_config
.
rx_coalesce_ticks
);
tw32
(
HOSTCC_RXCOL_TICKS
,
tw32
(
HOSTCC_RXMAX_FRAMES
,
tp
->
coalesce_config
.
rx_coalesce_ticks
);
tp
->
coalesce_config
.
rx_max_coalesced_frames
);
tw32
(
HOSTCC_RXMAX_FRAMES
,
tp
->
coalesce_config
.
rx_max_coalesced_frames
);
}
if
(
do_tx
)
{
tw32
(
HOSTCC_TXCOL_TICKS
,
tp
->
coalesce_config
.
tx_coalesce_ticks
);
tw32
(
HOSTCC_TXMAX_FRAMES
,
tp
->
coalesce_config
.
tx_max_coalesced_frames
);
}
}
}
tp
->
last_rate_sample
=
jiffies
;
tp
->
last_rate_sample
=
jiffies
;
...
@@ -1919,10 +1966,11 @@ static void tg3_interrupt_main_work(struct tg3 *tp)
...
@@ -1919,10 +1966,11 @@ static void tg3_interrupt_main_work(struct tg3 *tp)
did_pkts
=
1
;
did_pkts
=
1
;
}
}
if
(
did_pkts
)
{
if
(
did_pkts
&&
(
tp
->
tg3_flags
&
(
TG3_FLAG_ADAPTIVE_RX
|
TG3_FLAG_ADAPTIVE_TX
)))
{
unsigned
long
ticks
=
jiffies
-
tp
->
last_rate_sample
;
unsigned
long
ticks
=
jiffies
-
tp
->
last_rate_sample
;
if
(
ticks
>=
RATE_SAMPLE_INTERVAL
)
if
(
ticks
>=
tp
->
coalesce_config
.
rate_sample_jiffies
)
tg3_rate_sample
(
tp
,
ticks
);
tg3_rate_sample
(
tp
,
ticks
);
}
}
}
}
...
@@ -2590,14 +2638,14 @@ static void tg3_init_rings(struct tg3 *tp)
...
@@ -2590,14 +2638,14 @@ static void tg3_init_rings(struct tg3 *tp)
}
}
/* Now allocate fresh SKBs for each rx ring. */
/* Now allocate fresh SKBs for each rx ring. */
for
(
i
=
0
;
i
<
TG3_RX_RING_PENDING
;
i
++
)
{
for
(
i
=
0
;
i
<
tp
->
rx_pending
;
i
++
)
{
if
(
tg3_alloc_rx_skb
(
tp
,
RXD_OPAQUE_RING_STD
,
if
(
tg3_alloc_rx_skb
(
tp
,
RXD_OPAQUE_RING_STD
,
-
1
,
i
)
<
0
)
-
1
,
i
)
<
0
)
break
;
break
;
}
}
#if TG3_MINI_RING_WORKS
#if TG3_MINI_RING_WORKS
for
(
i
=
0
;
i
<
TG3_RX_MINI_RING_PENDING
;
i
++
)
{
for
(
i
=
0
;
i
<
tp
->
rx_mini_pending
;
i
++
)
{
if
(
tg3_alloc_rx_skb
(
tp
,
RXD_OPAQUE_RING_MINI
,
if
(
tg3_alloc_rx_skb
(
tp
,
RXD_OPAQUE_RING_MINI
,
-
1
,
i
)
<
0
)
-
1
,
i
)
<
0
)
break
;
break
;
...
@@ -2605,7 +2653,7 @@ static void tg3_init_rings(struct tg3 *tp)
...
@@ -2605,7 +2653,7 @@ static void tg3_init_rings(struct tg3 *tp)
#endif
#endif
if
(
tp
->
tg3_flags
&
TG3_FLAG_JUMBO_ENABLE
)
{
if
(
tp
->
tg3_flags
&
TG3_FLAG_JUMBO_ENABLE
)
{
for
(
i
=
0
;
i
<
TG3_RX_JUMBO_RING_PENDING
;
i
++
)
{
for
(
i
=
0
;
i
<
tp
->
rx_jumbo_pending
;
i
++
)
{
if
(
tg3_alloc_rx_skb
(
tp
,
RXD_OPAQUE_RING_JUMBO
,
if
(
tg3_alloc_rx_skb
(
tp
,
RXD_OPAQUE_RING_JUMBO
,
-
1
,
i
)
<
0
)
-
1
,
i
)
<
0
)
break
;
break
;
...
@@ -3420,11 +3468,11 @@ static int tg3_reset_hw(struct tg3 *tp)
...
@@ -3420,11 +3468,11 @@ static int tg3_reset_hw(struct tg3 *tp)
}
}
/* Setup replenish thresholds. */
/* Setup replenish thresholds. */
tw32
(
RCVBDI_STD_THRESH
,
TG3_RX_RING_PENDING
/
8
);
tw32
(
RCVBDI_STD_THRESH
,
tp
->
rx_pending
/
8
);
#if TG3_MINI_RING_WORKS
#if TG3_MINI_RING_WORKS
tw32
(
RCVBDI_MINI_THRESH
,
TG3_RX_MINI_RING_PENDING
/
8
);
tw32
(
RCVBDI_MINI_THRESH
,
tp
->
rx_mini_pending
/
8
);
#endif
#endif
tw32
(
RCVBDI_JUMBO_THRESH
,
TG3_RX_JUMBO_RING_PENDING
/
8
);
tw32
(
RCVBDI_JUMBO_THRESH
,
tp
->
rx_jumbo_pending
/
8
);
/* Clear out send RCB ring in SRAM. */
/* Clear out send RCB ring in SRAM. */
for
(
i
=
NIC_SRAM_SEND_RCB
;
i
<
NIC_SRAM_RCV_RET_RCB
;
i
+=
TG3_BDINFO_SIZE
)
for
(
i
=
NIC_SRAM_SEND_RCB
;
i
<
NIC_SRAM_RCV_RET_RCB
;
i
+=
TG3_BDINFO_SIZE
)
...
@@ -3462,17 +3510,17 @@ static int tg3_reset_hw(struct tg3 *tp)
...
@@ -3462,17 +3510,17 @@ static int tg3_reset_hw(struct tg3 *tp)
BDINFO_FLAGS_MAXLEN_SHIFT
),
BDINFO_FLAGS_MAXLEN_SHIFT
),
0
);
0
);
tp
->
rx_std_ptr
=
TG3_RX_RING_PENDING
;
tp
->
rx_std_ptr
=
tp
->
rx_pending
;
tw32_mailbox
(
MAILBOX_RCV_STD_PROD_IDX
+
TG3_64BIT_REG_LOW
,
tw32_mailbox
(
MAILBOX_RCV_STD_PROD_IDX
+
TG3_64BIT_REG_LOW
,
tp
->
rx_std_ptr
);
tp
->
rx_std_ptr
);
#if TG3_MINI_RING_WORKS
#if TG3_MINI_RING_WORKS
tp
->
rx_mini_ptr
=
TG3_RX_MINI_RING_PENDING
;
tp
->
rx_mini_ptr
=
tp
->
rx_mini_pending
;
tw32_mailbox
(
MAILBOX_RCV_MINI_PROD_IDX
+
TG3_64BIT_REG_LOW
,
tw32_mailbox
(
MAILBOX_RCV_MINI_PROD_IDX
+
TG3_64BIT_REG_LOW
,
tp
->
rx_mini_ptr
);
tp
->
rx_mini_ptr
);
#endif
#endif
if
(
tp
->
tg3_flags
&
TG3_FLAG_JUMBO_ENABLE
)
if
(
tp
->
tg3_flags
&
TG3_FLAG_JUMBO_ENABLE
)
tp
->
rx_jumbo_ptr
=
TG3_RX_JUMBO_RING_PENDING
;
tp
->
rx_jumbo_ptr
=
tp
->
rx_jumbo_pending
;
else
else
tp
->
rx_jumbo_ptr
=
0
;
tp
->
rx_jumbo_ptr
=
0
;
tw32_mailbox
(
MAILBOX_RCV_JUMBO_PROD_IDX
+
TG3_64BIT_REG_LOW
,
tw32_mailbox
(
MAILBOX_RCV_JUMBO_PROD_IDX
+
TG3_64BIT_REG_LOW
,
...
@@ -3608,6 +3656,9 @@ static int tg3_reset_hw(struct tg3 *tp)
...
@@ -3608,6 +3656,9 @@ static int tg3_reset_hw(struct tg3 *tp)
udelay
(
10
);
udelay
(
10
);
tw32
(
MAC_RX_MODE
,
tp
->
rx_mode
);
tw32
(
MAC_RX_MODE
,
tp
->
rx_mode
);
if
(
tp
->
pci_chip_rev_id
==
CHIPREV_ID_5703_A1
)
tw32
(
MAC_SERDES_CFG
,
0x616000
);
err
=
tg3_setup_phy
(
tp
);
err
=
tg3_setup_phy
(
tp
);
if
(
err
)
if
(
err
)
return
err
;
return
err
;
...
@@ -4050,6 +4101,8 @@ static int tg3_open(struct net_device *dev)
...
@@ -4050,6 +4101,8 @@ static int tg3_open(struct net_device *dev)
}
}
#endif
#endif
static
struct
net_device_stats
*
tg3_get_stats
(
struct
net_device
*
);
static
int
tg3_close
(
struct
net_device
*
dev
)
static
int
tg3_close
(
struct
net_device
*
dev
)
{
{
struct
tg3
*
tp
=
dev
->
priv
;
struct
tg3
*
tp
=
dev
->
priv
;
...
@@ -4073,6 +4126,9 @@ static int tg3_close(struct net_device *dev)
...
@@ -4073,6 +4126,9 @@ static int tg3_close(struct net_device *dev)
free_irq
(
dev
->
irq
,
dev
);
free_irq
(
dev
->
irq
,
dev
);
memcpy
(
&
tp
->
net_stats_prev
,
tg3_get_stats
(
tp
->
dev
),
sizeof
(
tp
->
net_stats_prev
));
tg3_free_consistent
(
tp
);
tg3_free_consistent
(
tp
);
return
0
;
return
0
;
...
@@ -4121,46 +4177,53 @@ static struct net_device_stats *tg3_get_stats(struct net_device *dev)
...
@@ -4121,46 +4177,53 @@ static struct net_device_stats *tg3_get_stats(struct net_device *dev)
{
{
struct
tg3
*
tp
=
dev
->
priv
;
struct
tg3
*
tp
=
dev
->
priv
;
struct
net_device_stats
*
stats
=
&
tp
->
net_stats
;
struct
net_device_stats
*
stats
=
&
tp
->
net_stats
;
struct
net_device_stats
*
old_stats
=
&
tp
->
net_stats_prev
;
struct
tg3_hw_stats
*
hw_stats
=
tp
->
hw_stats
;
struct
tg3_hw_stats
*
hw_stats
=
tp
->
hw_stats
;
/* XXX Fix this... this is wrong because
* XXX it means every open/close we lose the stats.
*/
if
(
!
hw_stats
)
if
(
!
hw_stats
)
return
stats
;
return
old_
stats
;
stats
->
rx_packets
=
stats
->
rx_packets
=
old_stats
->
rx_packets
+
get_stat64
(
&
hw_stats
->
rx_ucast_packets
)
+
get_stat64
(
&
hw_stats
->
rx_ucast_packets
)
+
get_stat64
(
&
hw_stats
->
rx_mcast_packets
)
+
get_stat64
(
&
hw_stats
->
rx_mcast_packets
)
+
get_stat64
(
&
hw_stats
->
rx_bcast_packets
);
get_stat64
(
&
hw_stats
->
rx_bcast_packets
);
stats
->
tx_packets
=
stats
->
tx_packets
=
old_stats
->
tx_packets
+
get_stat64
(
&
hw_stats
->
COS_out_packets
[
0
]);
get_stat64
(
&
hw_stats
->
COS_out_packets
[
0
]);
stats
->
rx_bytes
=
get_stat64
(
&
hw_stats
->
rx_octets
);
stats
->
rx_bytes
=
old_stats
->
rx_bytes
+
stats
->
tx_bytes
=
get_stat64
(
&
hw_stats
->
tx_octets
);
get_stat64
(
&
hw_stats
->
rx_octets
);
stats
->
tx_bytes
=
old_stats
->
tx_bytes
+
get_stat64
(
&
hw_stats
->
tx_octets
);
stats
->
rx_errors
=
get_stat64
(
&
hw_stats
->
rx_errors
);
stats
->
rx_errors
=
old_stats
->
rx_errors
+
stats
->
tx_errors
=
get_stat64
(
&
hw_stats
->
rx_errors
);
stats
->
tx_errors
=
old_stats
->
tx_errors
+
get_stat64
(
&
hw_stats
->
tx_errors
)
+
get_stat64
(
&
hw_stats
->
tx_errors
)
+
get_stat64
(
&
hw_stats
->
tx_mac_errors
)
+
get_stat64
(
&
hw_stats
->
tx_mac_errors
)
+
get_stat64
(
&
hw_stats
->
tx_carrier_sense_errors
)
+
get_stat64
(
&
hw_stats
->
tx_carrier_sense_errors
)
+
get_stat64
(
&
hw_stats
->
tx_discards
);
get_stat64
(
&
hw_stats
->
tx_discards
);
stats
->
multicast
=
get_stat64
(
&
hw_stats
->
rx_mcast_packets
);
stats
->
multicast
=
old_stats
->
multicast
+
stats
->
collisions
=
get_stat64
(
&
hw_stats
->
tx_collisions
);
get_stat64
(
&
hw_stats
->
rx_mcast_packets
);
stats
->
collisions
=
old_stats
->
collisions
+
get_stat64
(
&
hw_stats
->
tx_collisions
);
stats
->
rx_length_errors
=
stats
->
rx_length_errors
=
old_stats
->
rx_length_errors
+
get_stat64
(
&
hw_stats
->
rx_frame_too_long_errors
)
+
get_stat64
(
&
hw_stats
->
rx_frame_too_long_errors
)
+
get_stat64
(
&
hw_stats
->
rx_undersize_packets
);
get_stat64
(
&
hw_stats
->
rx_undersize_packets
);
stats
->
rx_over_errors
=
get_stat64
(
&
hw_stats
->
rxbds_empty
);
stats
->
rx_over_errors
=
old_stats
->
rx_over_errors
+
stats
->
rx_frame_errors
=
get_stat64
(
&
hw_stats
->
rx_align_errors
);
get_stat64
(
&
hw_stats
->
rxbds_empty
);
stats
->
tx_aborted_errors
=
get_stat64
(
&
hw_stats
->
tx_discards
);
stats
->
rx_frame_errors
=
old_stats
->
rx_frame_errors
+
stats
->
tx_carrier_errors
=
get_stat64
(
&
hw_stats
->
rx_align_errors
);
stats
->
tx_aborted_errors
=
old_stats
->
tx_aborted_errors
+
get_stat64
(
&
hw_stats
->
tx_discards
);
stats
->
tx_carrier_errors
=
old_stats
->
tx_carrier_errors
+
get_stat64
(
&
hw_stats
->
tx_carrier_sense_errors
);
get_stat64
(
&
hw_stats
->
tx_carrier_sense_errors
);
stats
->
rx_crc_errors
=
calc_crc_errors
(
tp
);
stats
->
rx_crc_errors
=
old_stats
->
rx_crc_errors
+
calc_crc_errors
(
tp
);
return
stats
;
return
stats
;
}
}
...
@@ -4255,16 +4318,18 @@ static void tg3_set_rx_mode(struct net_device *dev)
...
@@ -4255,16 +4318,18 @@ static void tg3_set_rx_mode(struct net_device *dev)
spin_unlock_irq
(
&
tp
->
lock
);
spin_unlock_irq
(
&
tp
->
lock
);
}
}
#define TG3_REGDUMP_LEN (32 * 1024)
static
u8
*
tg3_get_regs
(
struct
tg3
*
tp
)
static
u8
*
tg3_get_regs
(
struct
tg3
*
tp
)
{
{
u8
*
orig_p
=
kmalloc
(
(
32
*
1024
)
,
GFP_KERNEL
);
u8
*
orig_p
=
kmalloc
(
TG3_REGDUMP_LEN
,
GFP_KERNEL
);
u8
*
p
;
u8
*
p
;
int
i
;
int
i
;
if
(
orig_p
==
NULL
)
if
(
orig_p
==
NULL
)
return
NULL
;
return
NULL
;
memset
(
orig_p
,
0
,
(
32
*
1024
)
);
memset
(
orig_p
,
0
,
TG3_REGDUMP_LEN
);
spin_lock_irq
(
&
tp
->
lock
);
spin_lock_irq
(
&
tp
->
lock
);
...
@@ -4320,6 +4385,177 @@ do { p = orig_p + (reg); \
...
@@ -4320,6 +4385,177 @@ do { p = orig_p + (reg); \
return
orig_p
;
return
orig_p
;
}
}
static
void
tg3_to_ethtool_coal
(
struct
tg3
*
tp
,
struct
ethtool_coalesce
*
ecoal
)
{
ecoal
->
rx_coalesce_usecs
=
tp
->
coalesce_config
.
rx_coalesce_ticks_def
;
ecoal
->
rx_max_coalesced_frames
=
tp
->
coalesce_config
.
rx_max_coalesced_frames_def
;
ecoal
->
rx_coalesce_usecs_irq
=
tp
->
coalesce_config
.
rx_coalesce_ticks_during_int_def
;
ecoal
->
rx_max_coalesced_frames_irq
=
tp
->
coalesce_config
.
rx_max_coalesced_frames_during_int_def
;
ecoal
->
tx_coalesce_usecs
=
tp
->
coalesce_config
.
tx_coalesce_ticks_def
;
ecoal
->
tx_max_coalesced_frames
=
tp
->
coalesce_config
.
tx_max_coalesced_frames_def
;
ecoal
->
tx_coalesce_usecs_irq
=
tp
->
coalesce_config
.
tx_coalesce_ticks_during_int_def
;
ecoal
->
tx_max_coalesced_frames_irq
=
tp
->
coalesce_config
.
tx_max_coalesced_frames_during_int_def
;
ecoal
->
stats_block_coalesce_usecs
=
tp
->
coalesce_config
.
stats_coalesce_ticks_def
;
ecoal
->
use_adaptive_rx_coalesce
=
(
tp
->
tg3_flags
&
TG3_FLAG_ADAPTIVE_RX
)
!=
0
;
ecoal
->
use_adaptive_tx_coalesce
=
(
tp
->
tg3_flags
&
TG3_FLAG_ADAPTIVE_TX
)
!=
0
;
ecoal
->
pkt_rate_low
=
tp
->
coalesce_config
.
pkt_rate_low
;
ecoal
->
rx_coalesce_usecs_low
=
tp
->
coalesce_config
.
rx_coalesce_ticks_low
;
ecoal
->
rx_max_coalesced_frames_low
=
tp
->
coalesce_config
.
rx_max_coalesced_frames_low
;
ecoal
->
tx_coalesce_usecs_low
=
tp
->
coalesce_config
.
tx_coalesce_ticks_low
;
ecoal
->
tx_max_coalesced_frames_low
=
tp
->
coalesce_config
.
tx_max_coalesced_frames_low
;
ecoal
->
pkt_rate_high
=
tp
->
coalesce_config
.
pkt_rate_high
;
ecoal
->
rx_coalesce_usecs_high
=
tp
->
coalesce_config
.
rx_coalesce_ticks_high
;
ecoal
->
rx_max_coalesced_frames_high
=
tp
->
coalesce_config
.
rx_max_coalesced_frames_high
;
ecoal
->
tx_coalesce_usecs_high
=
tp
->
coalesce_config
.
tx_coalesce_ticks_high
;
ecoal
->
tx_max_coalesced_frames_high
=
tp
->
coalesce_config
.
tx_max_coalesced_frames_high
;
ecoal
->
rate_sample_interval
=
tp
->
coalesce_config
.
rate_sample_jiffies
/
HZ
;
}
static
int
tg3_from_ethtool_coal
(
struct
tg3
*
tp
,
struct
ethtool_coalesce
*
ecoal
)
{
/* Make sure we are not getting garbage. */
if
((
ecoal
->
rx_coalesce_usecs
==
0
&&
ecoal
->
rx_max_coalesced_frames
==
0
)
||
(
ecoal
->
tx_coalesce_usecs
==
0
&&
ecoal
->
tx_max_coalesced_frames
==
0
)
||
ecoal
->
stats_block_coalesce_usecs
==
0
)
return
-
EINVAL
;
if
(
ecoal
->
use_adaptive_rx_coalesce
||
ecoal
->
use_adaptive_tx_coalesce
)
{
if
(
ecoal
->
pkt_rate_low
>
ecoal
->
pkt_rate_high
)
return
-
EINVAL
;
if
(
ecoal
->
rate_sample_interval
==
0
)
return
-
EINVAL
;
if
(
ecoal
->
use_adaptive_rx_coalesce
&&
((
ecoal
->
rx_coalesce_usecs_low
==
0
&&
ecoal
->
rx_max_coalesced_frames_low
==
0
)
||
(
ecoal
->
rx_coalesce_usecs_high
==
0
&&
ecoal
->
rx_max_coalesced_frames_high
==
0
)))
return
-
EINVAL
;
if
(
ecoal
->
use_adaptive_tx_coalesce
&&
((
ecoal
->
tx_coalesce_usecs_low
==
0
&&
ecoal
->
tx_max_coalesced_frames_low
==
0
)
||
(
ecoal
->
tx_coalesce_usecs_high
==
0
&&
ecoal
->
tx_max_coalesced_frames_high
==
0
)))
return
-
EINVAL
;
}
/* Looks good, let it rip. */
spin_lock_irq
(
&
tp
->
lock
);
tp
->
coalesce_config
.
rx_coalesce_ticks
=
tp
->
coalesce_config
.
rx_coalesce_ticks_def
=
ecoal
->
rx_coalesce_usecs
;
tp
->
coalesce_config
.
rx_max_coalesced_frames
=
tp
->
coalesce_config
.
rx_max_coalesced_frames_def
=
ecoal
->
rx_max_coalesced_frames
;
tp
->
coalesce_config
.
rx_coalesce_ticks_during_int
=
tp
->
coalesce_config
.
rx_coalesce_ticks_during_int_def
=
ecoal
->
rx_coalesce_usecs_irq
;
tp
->
coalesce_config
.
rx_max_coalesced_frames_during_int
=
tp
->
coalesce_config
.
rx_max_coalesced_frames_during_int_def
=
ecoal
->
rx_max_coalesced_frames_irq
;
tp
->
coalesce_config
.
tx_coalesce_ticks
=
tp
->
coalesce_config
.
tx_coalesce_ticks_def
=
ecoal
->
tx_coalesce_usecs
;
tp
->
coalesce_config
.
tx_max_coalesced_frames
=
tp
->
coalesce_config
.
tx_max_coalesced_frames_def
=
ecoal
->
tx_max_coalesced_frames
;
tp
->
coalesce_config
.
tx_coalesce_ticks_during_int
=
tp
->
coalesce_config
.
tx_coalesce_ticks_during_int_def
=
ecoal
->
tx_coalesce_usecs_irq
;
tp
->
coalesce_config
.
tx_max_coalesced_frames_during_int
=
tp
->
coalesce_config
.
tx_max_coalesced_frames_during_int_def
=
ecoal
->
tx_max_coalesced_frames_irq
;
tp
->
coalesce_config
.
stats_coalesce_ticks
=
tp
->
coalesce_config
.
stats_coalesce_ticks_def
=
ecoal
->
stats_block_coalesce_usecs
;
if
(
ecoal
->
use_adaptive_rx_coalesce
)
tp
->
tg3_flags
|=
TG3_FLAG_ADAPTIVE_RX
;
else
tp
->
tg3_flags
&=
~
TG3_FLAG_ADAPTIVE_RX
;
if
(
ecoal
->
use_adaptive_tx_coalesce
)
tp
->
tg3_flags
|=
TG3_FLAG_ADAPTIVE_TX
;
else
tp
->
tg3_flags
&=
~
TG3_FLAG_ADAPTIVE_TX
;
tp
->
coalesce_config
.
pkt_rate_low
=
ecoal
->
pkt_rate_low
;
tp
->
coalesce_config
.
pkt_rate_high
=
ecoal
->
pkt_rate_high
;
tp
->
coalesce_config
.
rate_sample_jiffies
=
ecoal
->
rate_sample_interval
*
HZ
;
tp
->
coalesce_config
.
rx_coalesce_ticks_low
=
ecoal
->
rx_coalesce_usecs_low
;
tp
->
coalesce_config
.
rx_max_coalesced_frames_low
=
ecoal
->
rx_max_coalesced_frames_low
;
tp
->
coalesce_config
.
tx_coalesce_ticks_low
=
ecoal
->
tx_coalesce_usecs_low
;
tp
->
coalesce_config
.
tx_max_coalesced_frames_low
=
ecoal
->
tx_max_coalesced_frames_low
;
tp
->
coalesce_config
.
rx_coalesce_ticks_high
=
ecoal
->
rx_coalesce_usecs_high
;
tp
->
coalesce_config
.
rx_max_coalesced_frames_high
=
ecoal
->
rx_max_coalesced_frames_high
;
tp
->
coalesce_config
.
tx_coalesce_ticks_high
=
ecoal
->
tx_coalesce_usecs_high
;
tp
->
coalesce_config
.
tx_max_coalesced_frames_high
=
ecoal
->
tx_max_coalesced_frames_high
;
tw32
(
HOSTCC_RXCOL_TICKS
,
tp
->
coalesce_config
.
rx_coalesce_ticks_def
);
tw32
(
HOSTCC_RXMAX_FRAMES
,
tp
->
coalesce_config
.
rx_max_coalesced_frames_def
);
tw32
(
HOSTCC_RXCOAL_TICK_INT
,
tp
->
coalesce_config
.
rx_coalesce_ticks_during_int_def
);
tw32
(
HOSTCC_RXCOAL_MAXF_INT
,
tp
->
coalesce_config
.
rx_max_coalesced_frames_during_int_def
);
tw32
(
HOSTCC_TXCOL_TICKS
,
tp
->
coalesce_config
.
tx_coalesce_ticks_def
);
tw32
(
HOSTCC_TXMAX_FRAMES
,
tp
->
coalesce_config
.
tx_max_coalesced_frames_def
);
tw32
(
HOSTCC_TXCOAL_TICK_INT
,
tp
->
coalesce_config
.
tx_coalesce_ticks_during_int_def
);
tw32
(
HOSTCC_TXCOAL_MAXF_INT
,
tp
->
coalesce_config
.
tx_max_coalesced_frames_during_int_def
);
tw32
(
HOSTCC_STAT_COAL_TICKS
,
tp
->
coalesce_config
.
stats_coalesce_ticks_def
);
spin_unlock_irq
(
&
tp
->
lock
);
return
0
;
}
static
int
tg3_ethtool_ioctl
(
struct
net_device
*
dev
,
void
*
useraddr
)
static
int
tg3_ethtool_ioctl
(
struct
net_device
*
dev
,
void
*
useraddr
)
{
{
struct
tg3
*
tp
=
dev
->
priv
;
struct
tg3
*
tp
=
dev
->
priv
;
...
@@ -4334,7 +4570,10 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
...
@@ -4334,7 +4570,10 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
struct
ethtool_drvinfo
info
=
{
ETHTOOL_GDRVINFO
};
struct
ethtool_drvinfo
info
=
{
ETHTOOL_GDRVINFO
};
strcpy
(
info
.
driver
,
DRV_MODULE_NAME
);
strcpy
(
info
.
driver
,
DRV_MODULE_NAME
);
strcpy
(
info
.
version
,
DRV_MODULE_VERSION
);
strcpy
(
info
.
version
,
DRV_MODULE_VERSION
);
memset
(
&
info
.
fw_version
,
0
,
sizeof
(
info
.
fw_version
));
strcpy
(
info
.
bus_info
,
pci_dev
->
slot_name
);
strcpy
(
info
.
bus_info
,
pci_dev
->
slot_name
);
info
.
eedump_len
=
0
;
info
.
regdump_len
=
TG3_REGDUMP_LEN
;
if
(
copy_to_user
(
useraddr
,
&
info
,
sizeof
(
info
)))
if
(
copy_to_user
(
useraddr
,
&
info
,
sizeof
(
info
)))
return
-
EFAULT
;
return
-
EFAULT
;
return
0
;
return
0
;
...
@@ -4368,8 +4607,8 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
...
@@ -4368,8 +4607,8 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
cmd
.
phy_address
=
PHY_ADDR
;
cmd
.
phy_address
=
PHY_ADDR
;
cmd
.
transceiver
=
0
;
cmd
.
transceiver
=
0
;
cmd
.
autoneg
=
tp
->
link_config
.
autoneg
;
cmd
.
autoneg
=
tp
->
link_config
.
autoneg
;
cmd
.
maxtxpkt
=
tp
->
coalesce_config
.
tx_max_coalesced_frames
;
cmd
.
maxtxpkt
=
tp
->
coalesce_config
.
tx_max_coalesced_frames
_def
;
cmd
.
maxrxpkt
=
tp
->
coalesce_config
.
rx_max_coalesced_frames
;
cmd
.
maxrxpkt
=
tp
->
coalesce_config
.
rx_max_coalesced_frames
_def
;
if
(
copy_to_user
(
useraddr
,
&
cmd
,
sizeof
(
cmd
)))
if
(
copy_to_user
(
useraddr
,
&
cmd
,
sizeof
(
cmd
)))
return
-
EFAULT
;
return
-
EFAULT
;
return
0
;
return
0
;
...
@@ -4422,9 +4661,11 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
...
@@ -4422,9 +4661,11 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
}
}
if
(
cmd
.
maxtxpkt
||
cmd
.
maxrxpkt
)
{
if
(
cmd
.
maxtxpkt
||
cmd
.
maxrxpkt
)
{
tp
->
coalesce_config
.
tx_max_coalesced_frames
=
tp
->
coalesce_config
.
tx_max_coalesced_frames_def
=
tp
->
coalesce_config
.
tx_max_coalesced_frames
=
cmd
.
maxtxpkt
;
cmd
.
maxtxpkt
;
tp
->
coalesce_config
.
rx_max_coalesced_frames
=
tp
->
coalesce_config
.
rx_max_coalesced_frames_def
=
tp
->
coalesce_config
.
rx_max_coalesced_frames
=
cmd
.
maxrxpkt
;
cmd
.
maxrxpkt
;
/* Coalescing config bits can be updated without
/* Coalescing config bits can be updated without
...
@@ -4448,8 +4689,8 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
...
@@ -4448,8 +4689,8 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
if
(
copy_from_user
(
&
regs
,
useraddr
,
sizeof
(
regs
)))
if
(
copy_from_user
(
&
regs
,
useraddr
,
sizeof
(
regs
)))
return
-
EFAULT
;
return
-
EFAULT
;
if
(
regs
.
len
>
(
32
*
1024
)
)
if
(
regs
.
len
>
TG3_REGDUMP_LEN
)
regs
.
len
=
(
32
*
1024
)
;
regs
.
len
=
TG3_REGDUMP_LEN
;
regs
.
version
=
0
;
regs
.
version
=
0
;
if
(
copy_to_user
(
useraddr
,
&
regs
,
sizeof
(
regs
)))
if
(
copy_to_user
(
useraddr
,
&
regs
,
sizeof
(
regs
)))
return
-
EFAULT
;
return
-
EFAULT
;
...
@@ -4530,6 +4771,196 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
...
@@ -4530,6 +4771,196 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
if
(
copy_to_user
(
useraddr
,
&
edata
,
sizeof
(
edata
)))
if
(
copy_to_user
(
useraddr
,
&
edata
,
sizeof
(
edata
)))
return
-
EFAULT
;
return
-
EFAULT
;
}
}
case
ETHTOOL_GCOALESCE
:
{
struct
ethtool_coalesce
ecoal
=
{
ETHTOOL_GCOALESCE
};
tg3_to_ethtool_coal
(
tp
,
&
ecoal
);
if
(
copy_to_user
(
useraddr
,
&
ecoal
,
sizeof
(
ecoal
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_SCOALESCE
:
{
struct
ethtool_coalesce
ecoal
;
if
(
copy_from_user
(
&
ecoal
,
useraddr
,
sizeof
(
ecoal
)))
return
-
EINVAL
;
return
tg3_from_ethtool_coal
(
tp
,
&
ecoal
);
}
case
ETHTOOL_GRINGPARAM
:
{
struct
ethtool_ringparam
ering
=
{
ETHTOOL_GRINGPARAM
};
ering
.
rx_max_pending
=
TG3_RX_RING_SIZE
-
1
;
#if TG3_MINI_RING_WORKS
ering
.
rx_mini_max_pending
=
TG3_RX_MINI_RING_SIZE
-
1
;
#else
ering
.
rx_mini_max_pending
=
0
;
#endif
ering
.
rx_jumbo_max_pending
=
TG3_RX_JUMBO_RING_SIZE
-
1
;
ering
.
rx_pending
=
tp
->
rx_pending
;
#if TG3_MINI_RING_WORKS
ering
.
rx_mini_pending
=
tp
->
rx_mini_pending
;
#else
ering
.
rx_mini_pending
=
0
;
#endif
ering
.
rx_jumbo_pending
=
tp
->
rx_jumbo_pending
;
ering
.
tx_pending
=
tp
->
tx_pending
;
if
(
copy_to_user
(
useraddr
,
&
ering
,
sizeof
(
ering
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_SRINGPARAM
:
{
struct
ethtool_ringparam
ering
;
if
(
copy_from_user
(
&
ering
,
useraddr
,
sizeof
(
ering
)))
return
-
EFAULT
;
if
((
ering
.
rx_pending
>
TG3_RX_RING_SIZE
-
1
)
||
#if TG3_MINI_RING_WORKS
(
ering
.
rx_mini_pending
>
TG3_RX_MINI_RING_SIZE
-
1
)
||
#endif
(
ering
.
rx_jumbo_pending
>
TG3_RX_JUMBO_RING_SIZE
-
1
)
||
(
ering
.
tx_pending
>
TG3_TX_RING_SIZE
-
1
))
return
-
EINVAL
;
spin_lock_irq
(
&
tp
->
lock
);
tp
->
rx_pending
=
ering
.
rx_pending
;
#if TG3_MINI_RING_WORKS
tp
->
rx_mini_pending
=
ering
.
rx_mini_pending
;
#endif
tp
->
rx_jumbo_pending
=
ering
.
rx_jumbo_pending
;
tp
->
tx_pending
=
ering
.
tx_pending
;
tg3_halt
(
tp
);
tg3_init_rings
(
tp
);
tg3_init_hw
(
tp
);
netif_wake_queue
(
tp
->
dev
);
spin_unlock_irq
(
&
tp
->
lock
);
return
0
;
}
case
ETHTOOL_GPAUSEPARAM
:
{
struct
ethtool_pauseparam
epause
=
{
ETHTOOL_GPAUSEPARAM
};
epause
.
autoneg
=
(
tp
->
tg3_flags
&
TG3_FLAG_PAUSE_AUTONEG
)
!=
0
;
epause
.
rx_pause
=
(
tp
->
tg3_flags
&
TG3_FLAG_PAUSE_RX
)
!=
0
;
epause
.
tx_pause
=
(
tp
->
tg3_flags
&
TG3_FLAG_PAUSE_TX
)
!=
0
;
if
(
copy_to_user
(
useraddr
,
&
epause
,
sizeof
(
epause
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_SPAUSEPARAM
:
{
struct
ethtool_pauseparam
epause
;
if
(
copy_from_user
(
&
epause
,
useraddr
,
sizeof
(
epause
)))
return
-
EFAULT
;
spin_lock_irq
(
&
tp
->
lock
);
if
(
epause
.
autoneg
)
tp
->
tg3_flags
|=
TG3_FLAG_PAUSE_AUTONEG
;
else
tp
->
tg3_flags
&=
~
TG3_FLAG_PAUSE_AUTONEG
;
if
(
epause
.
rx_pause
)
tp
->
tg3_flags
|=
TG3_FLAG_PAUSE_RX
;
else
tp
->
tg3_flags
&=
~
TG3_FLAG_PAUSE_RX
;
if
(
epause
.
tx_pause
)
tp
->
tg3_flags
|=
TG3_FLAG_PAUSE_TX
;
else
tp
->
tg3_flags
&=
~
TG3_FLAG_PAUSE_TX
;
tg3_halt
(
tp
);
tg3_init_rings
(
tp
);
tg3_init_hw
(
tp
);
spin_unlock_irq
(
&
tp
->
lock
);
return
0
;
}
case
ETHTOOL_GRXCSUM
:
{
struct
ethtool_value
edata
=
{
ETHTOOL_GRXCSUM
};
edata
.
data
=
(
tp
->
tg3_flags
&
TG3_FLAG_RX_CHECKSUMS
)
!=
0
;
if
(
copy_to_user
(
useraddr
,
&
edata
,
sizeof
(
edata
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_SRXCSUM
:
{
struct
ethtool_value
edata
;
if
(
copy_from_user
(
&
edata
,
useraddr
,
sizeof
(
edata
)))
return
-
EFAULT
;
if
(
tp
->
tg3_flags
&
TG3_FLAG_BROKEN_CHECKSUMS
)
{
if
(
edata
.
data
!=
0
)
return
-
EINVAL
;
return
0
;
}
spin_lock_irq
(
&
tp
->
lock
);
if
(
edata
.
data
)
tp
->
tg3_flags
|=
TG3_FLAG_RX_CHECKSUMS
;
else
tp
->
tg3_flags
&=
~
TG3_FLAG_RX_CHECKSUMS
;
spin_unlock_irq
(
&
tp
->
lock
);
return
0
;
}
case
ETHTOOL_GTXCSUM
:
{
struct
ethtool_value
edata
=
{
ETHTOOL_GTXCSUM
};
edata
.
data
=
(
tp
->
dev
->
features
&
NETIF_F_IP_CSUM
)
!=
0
;
if
(
copy_to_user
(
useraddr
,
&
edata
,
sizeof
(
edata
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_STXCSUM
:
{
struct
ethtool_value
edata
;
if
(
copy_from_user
(
&
edata
,
useraddr
,
sizeof
(
edata
)))
return
-
EFAULT
;
if
(
tp
->
tg3_flags
&
TG3_FLAG_BROKEN_CHECKSUMS
)
{
if
(
edata
.
data
!=
0
)
return
-
EINVAL
;
return
0
;
}
if
(
edata
.
data
)
tp
->
dev
->
features
|=
NETIF_F_IP_CSUM
;
else
tp
->
dev
->
features
&=
~
NETIF_F_IP_CSUM
;
return
0
;
}
case
ETHTOOL_GSG
:
{
struct
ethtool_value
edata
=
{
ETHTOOL_GSG
};
edata
.
data
=
(
tp
->
dev
->
features
&
NETIF_F_SG
)
!=
0
;
if
(
copy_to_user
(
useraddr
,
&
edata
,
sizeof
(
edata
)))
return
-
EFAULT
;
return
0
;
}
case
ETHTOOL_SSG
:
{
struct
ethtool_value
edata
;
if
(
copy_from_user
(
&
edata
,
useraddr
,
sizeof
(
edata
)))
return
-
EFAULT
;
if
(
edata
.
data
)
tp
->
dev
->
features
|=
NETIF_F_SG
;
else
tp
->
dev
->
features
&=
~
NETIF_F_SG
;
return
0
;
}
};
};
return
-
EOPNOTSUPP
;
return
-
EOPNOTSUPP
;
...
@@ -5126,10 +5557,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
...
@@ -5126,10 +5557,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* If not using tagged status, set the *_during_int
/* If not using tagged status, set the *_during_int
* coalesce default config values to zero.
* coalesce default config values to zero.
*/
*/
tp
->
coalesce_config
.
rx_coalesce_ticks_during_int
=
0
;
tp
->
coalesce_config
.
rx_coalesce_ticks_during_int
_def
=
0
;
tp
->
coalesce_config
.
rx_max_coalesced_frames_during_int
=
0
;
tp
->
coalesce_config
.
rx_max_coalesced_frames_during_int
_def
=
0
;
tp
->
coalesce_config
.
tx_coalesce_ticks_during_int
=
0
;
tp
->
coalesce_config
.
tx_coalesce_ticks_during_int
_def
=
0
;
tp
->
coalesce_config
.
tx_max_coalesced_frames_during_int
=
0
;
tp
->
coalesce_config
.
tx_max_coalesced_frames_during_int
_def
=
0
;
}
}
if
(
GET_CHIP_REV
(
tp
->
pci_chip_rev_id
)
!=
CHIPREV_5700_AX
&&
if
(
GET_CHIP_REV
(
tp
->
pci_chip_rev_id
)
!=
CHIPREV_5700_AX
&&
...
@@ -5566,20 +5997,57 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
...
@@ -5566,20 +5997,57 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
static
void
__devinit
tg3_init_coalesce_config
(
struct
tg3
*
tp
)
static
void
__devinit
tg3_init_coalesce_config
(
struct
tg3
*
tp
)
{
{
tp
->
coalesce_config
.
rx_coalesce_ticks
=
DEFAULT_RXCOL_TICKS
;
tp
->
coalesce_config
.
rx_coalesce_ticks
_def
=
DEFAULT_RXCOL_TICKS
;
tp
->
coalesce_config
.
rx_max_coalesced_frames
=
DEFAULT_RXMAX_FRAMES
;
tp
->
coalesce_config
.
rx_max_coalesced_frames
_def
=
DEFAULT_RXMAX_FRAMES
;
tp
->
coalesce_config
.
rx_coalesce_ticks_during_int
=
tp
->
coalesce_config
.
rx_coalesce_ticks_during_int
_def
=
DEFAULT_RXCOAL_TICK_INT
;
DEFAULT_RXCOAL_TICK_INT
;
tp
->
coalesce_config
.
rx_max_coalesced_frames_during_int
=
tp
->
coalesce_config
.
rx_max_coalesced_frames_during_int
_def
=
DEFAULT_RXCOAL_MAXF_INT
;
DEFAULT_RXCOAL_MAXF_INT
;
tp
->
coalesce_config
.
tx_coalesce_ticks
=
DEFAULT_TXCOL_TICKS
;
tp
->
coalesce_config
.
tx_coalesce_ticks
_def
=
DEFAULT_TXCOL_TICKS
;
tp
->
coalesce_config
.
tx_max_coalesced_frames
=
DEFAULT_TXMAX_FRAMES
;
tp
->
coalesce_config
.
tx_max_coalesced_frames
_def
=
DEFAULT_TXMAX_FRAMES
;
tp
->
coalesce_config
.
tx_coalesce_ticks_during_int
=
tp
->
coalesce_config
.
tx_coalesce_ticks_during_int
_def
=
DEFAULT_TXCOAL_TICK_INT
;
DEFAULT_TXCOAL_TICK_INT
;
tp
->
coalesce_config
.
tx_max_coalesced_frames_during_int
=
tp
->
coalesce_config
.
tx_max_coalesced_frames_during_int
_def
=
DEFAULT_TXCOAL_MAXF_INT
;
DEFAULT_TXCOAL_MAXF_INT
;
tp
->
coalesce_config
.
stats_coalesce_ticks
=
tp
->
coalesce_config
.
stats_coalesce_ticks
_def
=
DEFAULT_STAT_COAL_TICKS
;
DEFAULT_STAT_COAL_TICKS
;
tp
->
coalesce_config
.
rx_coalesce_ticks_low
=
LOW_RXCOL_TICKS
;
tp
->
coalesce_config
.
rx_max_coalesced_frames_low
=
LOW_RXMAX_FRAMES
;
tp
->
coalesce_config
.
tx_coalesce_ticks_low
=
LOW_TXCOL_TICKS
;
tp
->
coalesce_config
.
tx_max_coalesced_frames_low
=
LOW_TXMAX_FRAMES
;
tp
->
coalesce_config
.
rx_coalesce_ticks_high
=
HIGH_RXCOL_TICKS
;
tp
->
coalesce_config
.
rx_max_coalesced_frames_high
=
HIGH_RXMAX_FRAMES
;
tp
->
coalesce_config
.
tx_coalesce_ticks_high
=
HIGH_TXCOL_TICKS
;
tp
->
coalesce_config
.
tx_max_coalesced_frames_high
=
HIGH_TXMAX_FRAMES
;
/* Active == default */
tp
->
coalesce_config
.
rx_coalesce_ticks
=
tp
->
coalesce_config
.
rx_coalesce_ticks_def
;
tp
->
coalesce_config
.
rx_max_coalesced_frames
=
tp
->
coalesce_config
.
rx_max_coalesced_frames_def
;
tp
->
coalesce_config
.
tx_coalesce_ticks
=
tp
->
coalesce_config
.
tx_coalesce_ticks_def
;
tp
->
coalesce_config
.
tx_max_coalesced_frames
=
tp
->
coalesce_config
.
tx_max_coalesced_frames_def
;
tp
->
coalesce_config
.
stats_coalesce_ticks
=
tp
->
coalesce_config
.
stats_coalesce_ticks_def
;
tp
->
coalesce_config
.
rate_sample_jiffies
=
(
1
*
HZ
);
tp
->
coalesce_config
.
pkt_rate_low
=
22000
;
tp
->
coalesce_config
.
pkt_rate_high
=
61000
;
tp
->
tg3_flags
|=
TG3_FLAG_ADAPTIVE_RX
;
tp
->
tg3_flags
&=
~
(
TG3_FLAG_ADAPTIVE_TX
);
}
}
static
void
__devinit
tg3_init_bufmgr_config
(
struct
tg3
*
tp
)
static
void
__devinit
tg3_init_bufmgr_config
(
struct
tg3
*
tp
)
...
@@ -5743,6 +6211,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
...
@@ -5743,6 +6211,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tg3_init_bufmgr_config
(
tp
);
tg3_init_bufmgr_config
(
tp
);
tp
->
rx_pending
=
TG3_DEF_RX_RING_PENDING
;
#if TG3_MINI_RING_WORKS
tp
->
rx_mini_pending
=
TG3_DEF_RX_MINI_RING_PENDING
;
#endif
tp
->
rx_jumbo_pending
=
TG3_DEF_RX_JUMBO_RING_PENDING
;
tp
->
tx_pending
=
TG3_DEF_TX_RING_PENDING
;
dev
->
open
=
tg3_open
;
dev
->
open
=
tg3_open
;
dev
->
stop
=
tg3_close
;
dev
->
stop
=
tg3_close
;
dev
->
get_stats
=
tg3_get_stats
;
dev
->
get_stats
=
tg3_get_stats
;
...
@@ -5777,8 +6252,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
...
@@ -5777,8 +6252,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
/* Tigon3 can do ipv4 only... and some chips have buggy
/* Tigon3 can do ipv4 only... and some chips have buggy
* checksumming.
* checksumming.
*/
*/
if
((
tp
->
tg3_flags
&
TG3_FLAG_BROKEN_CHECKSUMS
)
==
0
)
if
((
tp
->
tg3_flags
&
TG3_FLAG_BROKEN_CHECKSUMS
)
==
0
)
{
dev
->
features
|=
NETIF_F_SG
|
NETIF_F_IP_CSUM
;
dev
->
features
|=
NETIF_F_SG
|
NETIF_F_IP_CSUM
;
tp
->
tg3_flags
|=
TG3_FLAG_RX_CHECKSUMS
;
}
else
tp
->
tg3_flags
&=
~
TG3_FLAG_RX_CHECKSUMS
;
err
=
register_netdev
(
dev
);
err
=
register_netdev
(
dev
);
if
(
err
)
{
if
(
err
)
{
...
...
drivers/net/tg3.h
View file @
ed492288
/* $Id: tg3.h,v 1.37.2.3
0 2002/03/05 10:08:39
davem Exp $
/* $Id: tg3.h,v 1.37.2.3
2 2002/03/11 12:18:18
davem Exp $
* tg3.h: Definitions for Broadcom Tigon3 ethernet driver.
* tg3.h: Definitions for Broadcom Tigon3 ethernet driver.
*
*
* Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)
...
@@ -713,13 +713,17 @@
...
@@ -713,13 +713,17 @@
#define DEFAULT_RXCOL_TICKS 0x00000048
#define DEFAULT_RXCOL_TICKS 0x00000048
#define HIGH_RXCOL_TICKS 0x00000096
#define HIGH_RXCOL_TICKS 0x00000096
#define HOSTCC_TXCOL_TICKS 0x00003c0c
#define HOSTCC_TXCOL_TICKS 0x00003c0c
#define LOW_TXCOL_TICKS 0x00000096
#define DEFAULT_TXCOL_TICKS 0x0000012c
#define DEFAULT_TXCOL_TICKS 0x0000012c
#define HIGH_TXCOL_TICKS 0x00000145
#define HOSTCC_RXMAX_FRAMES 0x00003c10
#define HOSTCC_RXMAX_FRAMES 0x00003c10
#define LOW_RXMAX_FRAMES 0x00000005
#define LOW_RXMAX_FRAMES 0x00000005
#define DEFAULT_RXMAX_FRAMES 0x00000008
#define DEFAULT_RXMAX_FRAMES 0x00000008
#define HIGH_RXMAX_FRAMES 0x00000012
#define HIGH_RXMAX_FRAMES 0x00000012
#define HOSTCC_TXMAX_FRAMES 0x00003c14
#define HOSTCC_TXMAX_FRAMES 0x00003c14
#define LOW_TXMAX_FRAMES 0x00000035
#define DEFAULT_TXMAX_FRAMES 0x0000004b
#define DEFAULT_TXMAX_FRAMES 0x0000004b
#define HIGH_TXMAX_FRAMES 0x00000052
#define HOSTCC_RXCOAL_TICK_INT 0x00003c18
#define HOSTCC_RXCOAL_TICK_INT 0x00003c18
#define DEFAULT_RXCOAL_TICK_INT 0x00000019
#define DEFAULT_RXCOAL_TICK_INT 0x00000019
#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c
#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c
...
@@ -1681,17 +1685,42 @@ struct tg3_link_config {
...
@@ -1681,17 +1685,42 @@ struct tg3_link_config {
};
};
struct
tg3_coalesce_config
{
struct
tg3_coalesce_config
{
/* Current settings. */
u32
rx_coalesce_ticks
;
u32
rx_coalesce_ticks
;
u32
rx_max_coalesced_frames
;
u32
rx_max_coalesced_frames
;
u32
rx_coalesce_ticks_during_int
;
u32
rx_coalesce_ticks_during_int
;
u32
rx_max_coalesced_frames_during_int
;
u32
rx_max_coalesced_frames_during_int
;
u32
tx_coalesce_ticks
;
u32
tx_coalesce_ticks
;
u32
tx_max_coalesced_frames
;
u32
tx_max_coalesced_frames
;
u32
tx_coalesce_ticks_during_int
;
u32
tx_coalesce_ticks_during_int
;
u32
tx_max_coalesced_frames_during_int
;
u32
tx_max_coalesced_frames_during_int
;
u32
stats_coalesce_ticks
;
u32
stats_coalesce_ticks
;
/* Default settings. */
u32
rx_coalesce_ticks_def
;
u32
rx_max_coalesced_frames_def
;
u32
rx_coalesce_ticks_during_int_def
;
u32
rx_max_coalesced_frames_during_int_def
;
u32
tx_coalesce_ticks_def
;
u32
tx_max_coalesced_frames_def
;
u32
tx_coalesce_ticks_during_int_def
;
u32
tx_max_coalesced_frames_during_int_def
;
u32
stats_coalesce_ticks_def
;
/* Adaptive RX/TX coalescing parameters. */
u32
rate_sample_jiffies
;
u32
pkt_rate_low
;
u32
pkt_rate_high
;
u32
rx_coalesce_ticks_low
;
u32
rx_max_coalesced_frames_low
;
u32
tx_coalesce_ticks_low
;
u32
tx_max_coalesced_frames_low
;
u32
rx_coalesce_ticks_high
;
u32
rx_max_coalesced_frames_high
;
u32
tx_coalesce_ticks_high
;
u32
tx_max_coalesced_frames_high
;
};
};
struct
tg3_bufmgr_config
{
struct
tg3_bufmgr_config
{
...
@@ -1720,6 +1749,7 @@ struct tg3 {
...
@@ -1720,6 +1749,7 @@ struct tg3 {
spinlock_t
indirect_lock
;
spinlock_t
indirect_lock
;
struct
net_device_stats
net_stats
;
struct
net_device_stats
net_stats
;
struct
net_device_stats
net_stats_prev
;
unsigned
long
phy_crc_errors
;
unsigned
long
phy_crc_errors
;
/* Adaptive coalescing engine. */
/* Adaptive coalescing engine. */
...
@@ -1731,9 +1761,11 @@ struct tg3 {
...
@@ -1731,9 +1761,11 @@ struct tg3 {
u32
tg3_flags
;
u32
tg3_flags
;
#define TG3_FLAG_HOST_TXDS 0x00000001
#define TG3_FLAG_HOST_TXDS 0x00000001
#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
#define TG3_FLAG_
BROKEN_CHECKSUMS
0x00000004
#define TG3_FLAG_
RX_CHECKSUMS
0x00000004
#define TG3_FLAG_USE_LINKCHG_REG 0x00000008
#define TG3_FLAG_USE_LINKCHG_REG 0x00000008
#define TG3_FLAG_USE_MI_INTERRUPT 0x00000010
#define TG3_FLAG_USE_MI_INTERRUPT 0x00000010
#define TG3_FLAG_ADAPTIVE_RX 0x00000020
#define TG3_FLAG_ADAPTIVE_TX 0x00000040
#define TG3_FLAG_PHY_RESET_ON_INIT 0x00000100
#define TG3_FLAG_PHY_RESET_ON_INIT 0x00000100
#define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200
#define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200
#define TG3_FLAG_TAGGED_IRQ_STATUS 0x00000400
#define TG3_FLAG_TAGGED_IRQ_STATUS 0x00000400
...
@@ -1751,6 +1783,10 @@ struct tg3 {
...
@@ -1751,6 +1783,10 @@ struct tg3 {
#define TG3_FLAG_AUTONEG_DISABLE 0x00400000
#define TG3_FLAG_AUTONEG_DISABLE 0x00400000
#define TG3_FLAG_JUMBO_ENABLE 0x00800000
#define TG3_FLAG_JUMBO_ENABLE 0x00800000
#define TG3_FLAG_10_100_ONLY 0x01000000
#define TG3_FLAG_10_100_ONLY 0x01000000
#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
#define TG3_FLAG_PAUSE_RX 0x04000000
#define TG3_FLAG_PAUSE_TX 0x08000000
#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
#define TG3_FLAG_INIT_COMPLETE 0x80000000
#define TG3_FLAG_INIT_COMPLETE 0x80000000
u32
msg_enable
;
u32
msg_enable
;
...
@@ -1764,6 +1800,13 @@ struct tg3 {
...
@@ -1764,6 +1800,13 @@ struct tg3 {
struct
tg3_coalesce_config
coalesce_config
;
struct
tg3_coalesce_config
coalesce_config
;
struct
tg3_bufmgr_config
bufmgr_config
;
struct
tg3_bufmgr_config
bufmgr_config
;
u32
rx_pending
;
#if TG3_MINI_RING_WORKS
u32
rx_mini_pending
;
#endif
u32
rx_jumbo_pending
;
u32
tx_pending
;
/* cache h/w values, often passed straight to h/w */
/* cache h/w values, often passed straight to h/w */
u32
rx_mode
;
u32
rx_mode
;
u32
tx_mode
;
u32
tx_mode
;
...
...
include/linux/ethtool.h
View file @
ed492288
...
@@ -72,6 +72,144 @@ struct ethtool_eeprom {
...
@@ -72,6 +72,144 @@ struct ethtool_eeprom {
u32
len
;
/* in bytes */
u32
len
;
/* in bytes */
u8
data
[
0
];
u8
data
[
0
];
};
};
/* for configuring coalescing parameters of chip */
struct
ethtool_coalesce
{
u32
cmd
;
/* ETHTOOL_{G,S}COALESCE */
/* How many usecs to delay an RX interrupt after
* a packet arrives. If 0, only rx_max_coalesced_frames
* is used.
*/
u32
rx_coalesce_usecs
;
/* How many packets to delay an RX interrupt after
* a packet arrives. If 0, only rx_coalesce_usecs is
* used. It is illegal to set both usecs and max frames
* to zero as this would cause RX interrupts to never be
* generated.
*/
u32
rx_max_coalesced_frames
;
/* Same as above two parameters, except that these values
* apply while an IRQ is being services by the host. Not
* all cards support this feature and the values are ignored
* in that case.
*/
u32
rx_coalesce_usecs_irq
;
u32
rx_max_coalesced_frames_irq
;
/* How many usecs to delay a TX interrupt after
* a packet is sent. If 0, only tx_max_coalesced_frames
* is used.
*/
u32
tx_coalesce_usecs
;
/* How many packets to delay a TX interrupt after
* a packet is sent. If 0, only tx_coalesce_usecs is
* used. It is illegal to set both usecs and max frames
* to zero as this would cause TX interrupts to never be
* generated.
*/
u32
tx_max_coalesced_frames
;
/* Same as above two parameters, except that these values
* apply while an IRQ is being services by the host. Not
* all cards support this feature and the values are ignored
* in that case.
*/
u32
tx_coalesce_usecs_irq
;
u32
tx_max_coalesced_frames_irq
;
/* How many usecs to delay in-memory statistics
* block updates. Some drivers do not have an in-memory
* statistic block, and in such cases this value is ignored.
* This value must not be zero.
*/
u32
stats_block_coalesce_usecs
;
/* Adaptive RX/TX coalescing is an algorithm implemented by
* some drivers to improve latency under low packet rates and
* improve throughput under high packet rates. Some drivers
* only implement one of RX or TX adaptive coalescing. Anything
* not implemented by the driver causes these values to be
* silently ignored.
*/
u32
use_adaptive_rx_coalesce
;
u32
use_adaptive_tx_coalesce
;
/* When the packet rate (measured in packets per second)
* is below pkt_rate_low, the {rx,tx}_*_low parameters are
* used.
*/
u32
pkt_rate_low
;
u32
rx_coalesce_usecs_low
;
u32
rx_max_coalesced_frames_low
;
u32
tx_coalesce_usecs_low
;
u32
tx_max_coalesced_frames_low
;
/* When the packet rate is below pkt_rate_high but above
* pkt_rate_low (both measured in packets per second) the
* normal {rx,tx}_* coalescing parameters are used.
*/
/* When the packet rate is (measured in packets per second)
* is above pkt_rate_high, the {rx,tx}_*_high parameters are
* used.
*/
u32
pkt_rate_high
;
u32
rx_coalesce_usecs_high
;
u32
rx_max_coalesced_frames_high
;
u32
tx_coalesce_usecs_high
;
u32
tx_max_coalesced_frames_high
;
/* How often to do adaptive coalescing packet rate sampling,
* measured in seconds. Must not be zero.
*/
u32
rate_sample_interval
;
};
/* for configuring RX/TX ring parameters */
struct
ethtool_ringparam
{
u32
cmd
;
/* ETHTOOL_{G,S}RINGPARAM */
/* Read only attributes. These indicate the maximum number
* of pending RX/TX ring entries the driver will allow the
* user to set.
*/
u32
rx_max_pending
;
u32
rx_mini_max_pending
;
u32
rx_jumbo_max_pending
;
u32
tx_max_pending
;
/* Values changeable by the user. The valid values are
* in the range 1 to the "*_max_pending" counterpart above.
*/
u32
rx_pending
;
u32
rx_mini_pending
;
u32
rx_jumbo_pending
;
u32
tx_pending
;
};
/* for configuring link flow control parameters */
struct
ethtool_pauseparam
{
u32
cmd
;
/* ETHTOOL_{G,S}PAUSEPARAM */
/* If the link is being auto-negotiated (via ethtool_cmd.autoneg
* being true) the user may set 'autonet' here non-zero to have the
* pause parameters be auto-negotiated too. In such a case, the
* {rx,tx}_pause values below determine what capabilities are
* advertised.
*
* If 'autoneg' is zero or the link is not being auto-negotiated,
* then {rx,tx}_pause force the driver to use/not-use pause
* flow control.
*/
u32
autoneg
;
u32
rx_pause
;
u32
tx_pause
;
};
/* CMDs currently supported */
/* CMDs currently supported */
#define ETHTOOL_GSET 0x00000001
/* Get settings. */
#define ETHTOOL_GSET 0x00000001
/* Get settings. */
#define ETHTOOL_SSET 0x00000002
/* Set settings, privileged. */
#define ETHTOOL_SSET 0x00000002
/* Set settings, privileged. */
...
@@ -82,9 +220,23 @@ struct ethtool_eeprom {
...
@@ -82,9 +220,23 @@ struct ethtool_eeprom {
#define ETHTOOL_GMSGLVL 0x00000007
/* Get driver message level */
#define ETHTOOL_GMSGLVL 0x00000007
/* Get driver message level */
#define ETHTOOL_SMSGLVL 0x00000008
/* Set driver msg level, priv. */
#define ETHTOOL_SMSGLVL 0x00000008
/* Set driver msg level, priv. */
#define ETHTOOL_NWAY_RST 0x00000009
/* Restart autonegotiation, priv. */
#define ETHTOOL_NWAY_RST 0x00000009
/* Restart autonegotiation, priv. */
#define ETHTOOL_GLINK 0x0000000a
/* Get link status */
#define ETHTOOL_GLINK 0x0000000a
/* Get link status
(ethtool_value)
*/
#define ETHTOOL_GEEPROM 0x0000000b
/* Get EEPROM data */
#define ETHTOOL_GEEPROM 0x0000000b
/* Get EEPROM data */
#define ETHTOOL_SEEPROM 0x0000000c
/* Set EEPROM data */
#define ETHTOOL_SEEPROM 0x0000000c
/* Set EEPROM data */
#define ETHTOOL_GCOALESCE 0x0000000e
/* Get coalesce config */
#define ETHTOOL_SCOALESCE 0x0000000f
/* Set coalesce config */
#define ETHTOOL_GRINGPARAM 0x00000010
/* Get ring parameters */
#define ETHTOOL_SRINGPARAM 0x00000011
/* Set ring parameters */
#define ETHTOOL_GPAUSEPARAM 0x00000012
/* Get pause parameters */
#define ETHTOOL_SPAUSEPARAM 0x00000013
/* Set pause parameters */
#define ETHTOOL_GRXCSUM 0x00000014
/* Get RX hw csum enable (ethtool_value) */
#define ETHTOOL_SRXCSUM 0x00000015
/* Set RX hw csum enable (ethtool_value) */
#define ETHTOOL_GTXCSUM 0x00000016
/* Get TX hw csum enable (ethtool_value) */
#define ETHTOOL_STXCSUM 0x00000017
/* Set TX hw csum enable (ethtool_value) */
#define ETHTOOL_GSG 0x00000018
/* Get scatter-gather enable
* (ethtool_value) */
#define ETHTOOL_SSG 0x00000019
/* Set scatter-gather enable
* (ethtool_value) */
/* compatibility with older code */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
#define SPARC_ETH_GSET ETHTOOL_GSET
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment