Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
97ba0eb6
Commit
97ba0eb6
authored
Oct 13, 2011
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge
git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
parents
3ceca749
a28dc43f
Changes
12
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
324 additions
and
84 deletions
+324
-84
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_82575.c
+26
-12
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_defines.h
+1
-0
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/e1000_regs.h
+1
-0
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb.h
+1
-0
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_main.c
+54
-58
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe.h
+4
-0
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+10
-2
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+2
-0
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+33
-11
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+2
-0
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+188
-1
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+2
-0
No files found.
drivers/net/ethernet/intel/igb/e1000_82575.c
View file @
97ba0eb6
...
...
@@ -66,10 +66,6 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
static
s32
igb_reset_mdicnfg_82580
(
struct
e1000_hw
*
hw
);
static
s32
igb_validate_nvm_checksum_82580
(
struct
e1000_hw
*
hw
);
static
s32
igb_update_nvm_checksum_82580
(
struct
e1000_hw
*
hw
);
static
s32
igb_update_nvm_checksum_with_offset
(
struct
e1000_hw
*
hw
,
u16
offset
);
static
s32
igb_validate_nvm_checksum_with_offset
(
struct
e1000_hw
*
hw
,
u16
offset
);
static
s32
igb_validate_nvm_checksum_i350
(
struct
e1000_hw
*
hw
);
static
s32
igb_update_nvm_checksum_i350
(
struct
e1000_hw
*
hw
);
static
const
u16
e1000_82580_rxpbs_table
[]
=
...
...
@@ -1584,14 +1580,31 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
**/
void
igb_vmdq_set_loopback_pf
(
struct
e1000_hw
*
hw
,
bool
enable
)
{
u32
dtxswc
=
rd32
(
E1000_DTXSWC
);
u32
dtxswc
;
switch
(
hw
->
mac
.
type
)
{
case
e1000_82576
:
dtxswc
=
rd32
(
E1000_DTXSWC
);
if
(
enable
)
dtxswc
|=
E1000_DTXSWC_VMDQ_LOOPBACK_EN
;
else
dtxswc
&=
~
E1000_DTXSWC_VMDQ_LOOPBACK_EN
;
wr32
(
E1000_DTXSWC
,
dtxswc
);
break
;
case
e1000_i350
:
dtxswc
=
rd32
(
E1000_TXSWC
);
if
(
enable
)
dtxswc
|=
E1000_DTXSWC_VMDQ_LOOPBACK_EN
;
else
dtxswc
&=
~
E1000_DTXSWC_VMDQ_LOOPBACK_EN
;
wr32
(
E1000_TXSWC
,
dtxswc
);
break
;
default:
/* Currently no other hardware supports loopback */
break
;
}
if
(
enable
)
dtxswc
|=
E1000_DTXSWC_VMDQ_LOOPBACK_EN
;
else
dtxswc
&=
~
E1000_DTXSWC_VMDQ_LOOPBACK_EN
;
wr32
(
E1000_DTXSWC
,
dtxswc
);
}
/**
...
...
@@ -1820,7 +1833,8 @@ u16 igb_rxpbs_adjust_82580(u32 data)
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
s32
igb_validate_nvm_checksum_with_offset
(
struct
e1000_hw
*
hw
,
u16
offset
)
static
s32
igb_validate_nvm_checksum_with_offset
(
struct
e1000_hw
*
hw
,
u16
offset
)
{
s32
ret_val
=
0
;
u16
checksum
=
0
;
...
...
@@ -1855,7 +1869,7 @@ s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM.
**/
s32
igb_update_nvm_checksum_with_offset
(
struct
e1000_hw
*
hw
,
u16
offset
)
s
tatic
s
32
igb_update_nvm_checksum_with_offset
(
struct
e1000_hw
*
hw
,
u16
offset
)
{
s32
ret_val
;
u16
checksum
=
0
;
...
...
drivers/net/ethernet/intel/igb/e1000_defines.h
View file @
97ba0eb6
...
...
@@ -85,6 +85,7 @@
#define E1000_RXD_STAT_TCPCS 0x20
/* TCP xsum calculated */
#define E1000_RXD_STAT_TS 0x10000
/* Pkt was time stamped */
#define E1000_RXDEXT_STATERR_LB 0x00040000
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
...
...
drivers/net/ethernet/intel/igb/e1000_regs.h
View file @
97ba0eb6
...
...
@@ -318,6 +318,7 @@
#define E1000_RPLOLR 0x05AF0
/* Replication Offload - RW */
#define E1000_UTA 0x0A000
/* Unicast Table Array - RW */
#define E1000_IOVTCL 0x05BBC
/* IOV Control Register */
#define E1000_TXSWC 0x05ACC
/* Tx Switch Control */
/* These act per VF so an array friendly macro is used */
#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
...
...
drivers/net/ethernet/intel/igb/igb.h
View file @
97ba0eb6
...
...
@@ -245,6 +245,7 @@ struct igb_ring {
enum
e1000_ring_flags_t
{
IGB_RING_FLAG_RX_SCTP_CSUM
,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP
,
IGB_RING_FLAG_TX_CTX_IDX
,
IGB_RING_FLAG_TX_DETECT_HANG
};
...
...
drivers/net/ethernet/intel/igb/igb_main.c
View file @
97ba0eb6
...
...
@@ -57,8 +57,8 @@
#include "igb.h"
#define MAJ 3
#define MIN
0
#define BUILD
6
#define MIN
2
#define BUILD
10
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char
igb_driver_name
[]
=
"igb"
;
...
...
@@ -563,7 +563,7 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
* the lowest register is SYSTIMR instead of SYSTIML. However we never
* adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
*/
if
(
hw
->
mac
.
type
=
=
e1000_82580
)
{
if
(
hw
->
mac
.
type
>
=
e1000_82580
)
{
stamp
=
rd32
(
E1000_SYSTIMR
)
>>
8
;
shift
=
IGB_82580_TSYNC_SHIFT
;
}
...
...
@@ -735,6 +735,11 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
/* set flag indicating ring supports SCTP checksum offload */
if
(
adapter
->
hw
.
mac
.
type
>=
e1000_82576
)
set_bit
(
IGB_RING_FLAG_RX_SCTP_CSUM
,
&
ring
->
flags
);
/* On i350, loopback VLAN packets have the tag byte-swapped. */
if
(
adapter
->
hw
.
mac
.
type
==
e1000_i350
)
set_bit
(
IGB_RING_FLAG_RX_LB_VLAN_BSWAP
,
&
ring
->
flags
);
adapter
->
rx_ring
[
i
]
=
ring
;
}
/* Restore the adapter's original node */
...
...
@@ -1262,7 +1267,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
goto
request_done
;
/* fall back to MSI */
igb_clear_interrupt_scheme
(
adapter
);
if
(
!
pci_enable_msi
(
adapter
->
pdev
))
if
(
!
pci_enable_msi
(
pdev
))
adapter
->
flags
|=
IGB_FLAG_HAS_MSI
;
igb_free_all_tx_resources
(
adapter
);
igb_free_all_rx_resources
(
adapter
);
...
...
@@ -1284,12 +1289,12 @@ static int igb_request_irq(struct igb_adapter *adapter)
}
igb_setup_all_tx_resources
(
adapter
);
igb_setup_all_rx_resources
(
adapter
);
}
else
{
igb_assign_vector
(
adapter
->
q_vector
[
0
],
0
);
}
igb_assign_vector
(
adapter
->
q_vector
[
0
],
0
);
if
(
adapter
->
flags
&
IGB_FLAG_HAS_MSI
)
{
err
=
request_irq
(
adapter
->
pdev
->
irq
,
igb_intr_msi
,
0
,
err
=
request_irq
(
pdev
->
irq
,
igb_intr_msi
,
0
,
netdev
->
name
,
adapter
);
if
(
!
err
)
goto
request_done
;
...
...
@@ -1299,11 +1304,11 @@ static int igb_request_irq(struct igb_adapter *adapter)
adapter
->
flags
&=
~
IGB_FLAG_HAS_MSI
;
}
err
=
request_irq
(
adapter
->
pdev
->
irq
,
igb_intr
,
IRQF_SHARED
,
err
=
request_irq
(
pdev
->
irq
,
igb_intr
,
IRQF_SHARED
,
netdev
->
name
,
adapter
);
if
(
err
)
dev_err
(
&
adapter
->
pdev
->
dev
,
"Error %d getting interrupt
\n
"
,
dev_err
(
&
pdev
->
dev
,
"Error %d getting interrupt
\n
"
,
err
);
request_done:
...
...
@@ -1317,11 +1322,9 @@ static void igb_free_irq(struct igb_adapter *adapter)
free_irq
(
adapter
->
msix_entries
[
vector
++
].
vector
,
adapter
);
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
{
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
i
];
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
free_irq
(
adapter
->
msix_entries
[
vector
++
].
vector
,
q_vector
);
}
adapter
->
q_vector
[
i
]);
}
else
{
free_irq
(
adapter
->
pdev
->
irq
,
adapter
);
}
...
...
@@ -1369,7 +1372,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
if
(
adapter
->
msix_entries
)
{
u32
ims
=
E1000_IMS_LSC
|
E1000_IMS_DOUTSYNC
;
u32
ims
=
E1000_IMS_LSC
|
E1000_IMS_DOUTSYNC
|
E1000_IMS_DRSTA
;
u32
regval
=
rd32
(
E1000_EIAC
);
wr32
(
E1000_EIAC
,
regval
|
adapter
->
eims_enable_mask
);
regval
=
rd32
(
E1000_EIAM
);
...
...
@@ -1379,9 +1382,6 @@ static void igb_irq_enable(struct igb_adapter *adapter)
wr32
(
E1000_MBVFIMR
,
0xFF
);
ims
|=
E1000_IMS_VMMB
;
}
if
(
adapter
->
hw
.
mac
.
type
==
e1000_82580
)
ims
|=
E1000_IMS_DRSTA
;
wr32
(
E1000_IMS
,
ims
);
}
else
{
wr32
(
E1000_IMS
,
IMS_ENABLE_MASK
|
...
...
@@ -1523,10 +1523,9 @@ int igb_up(struct igb_adapter *adapter)
clear_bit
(
__IGB_DOWN
,
&
adapter
->
state
);
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
{
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
i
];
napi_enable
(
&
q_vector
->
napi
);
}
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
napi_enable
(
&
(
adapter
->
q_vector
[
i
]
->
napi
));
if
(
adapter
->
msix_entries
)
igb_configure_msix
(
adapter
);
else
...
...
@@ -1578,10 +1577,8 @@ void igb_down(struct igb_adapter *adapter)
wrfl
();
msleep
(
10
);
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
{
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
i
];
napi_disable
(
&
q_vector
->
napi
);
}
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
napi_disable
(
&
(
adapter
->
q_vector
[
i
]
->
napi
));
igb_irq_disable
(
adapter
);
...
...
@@ -2546,10 +2543,8 @@ static int igb_open(struct net_device *netdev)
/* From here on the code is the same as igb_up() */
clear_bit
(
__IGB_DOWN
,
&
adapter
->
state
);
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
{
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
i
];
napi_enable
(
&
q_vector
->
napi
);
}
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
napi_enable
(
&
(
adapter
->
q_vector
[
i
]
->
napi
));
/* Clear any pending interrupts. */
rd32
(
E1000_ICR
);
...
...
@@ -3119,7 +3114,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
srrctl
|=
(
PAGE_SIZE
/
2
)
>>
E1000_SRRCTL_BSIZEPKT_SHIFT
;
#endif
srrctl
|=
E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
if
(
hw
->
mac
.
type
=
=
e1000_82580
)
if
(
hw
->
mac
.
type
>
=
e1000_82580
)
srrctl
|=
E1000_SRRCTL_TIMESTAMP
;
/* Only set Drop Enable if we are supporting multiple queues */
if
(
adapter
->
vfs_allocated_count
||
adapter
->
num_rx_queues
>
1
)
...
...
@@ -3769,10 +3764,8 @@ static void igb_watchdog_task(struct work_struct *work)
/* Cause software interrupt to ensure rx ring is cleaned */
if
(
adapter
->
msix_entries
)
{
u32
eics
=
0
;
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
{
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
i
];
eics
|=
q_vector
->
eims_value
;
}
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
eics
|=
adapter
->
q_vector
[
i
]
->
eims_value
;
wr32
(
E1000_EICS
,
eics
);
}
else
{
wr32
(
E1000_ICS
,
E1000_ICS_RXDMT0
);
...
...
@@ -4472,7 +4465,7 @@ static void igb_tx_timeout(struct net_device *netdev)
/* Do the reset outside of interrupt context */
adapter
->
tx_timeout_count
++
;
if
(
hw
->
mac
.
type
=
=
e1000_82580
)
if
(
hw
->
mac
.
type
>
=
e1000_82580
)
hw
->
dev_spec
.
_82575
.
global_device_reset
=
true
;
schedule_work
(
&
adapter
->
reset_task
);
...
...
@@ -4778,12 +4771,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
mod_timer
(
&
adapter
->
watchdog_timer
,
jiffies
+
1
);
}
if
(
adapter
->
vfs_allocated_count
)
wr32
(
E1000_IMS
,
E1000_IMS_LSC
|
E1000_IMS_VMMB
|
E1000_IMS_DOUTSYNC
);
else
wr32
(
E1000_IMS
,
E1000_IMS_LSC
|
E1000_IMS_DOUTSYNC
);
wr32
(
E1000_EIMS
,
adapter
->
eims_other
);
return
IRQ_HANDLED
;
...
...
@@ -5590,7 +5577,7 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
* The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
* 24 to match clock shift we setup earlier.
*/
if
(
adapter
->
hw
.
mac
.
type
=
=
e1000_82580
)
if
(
adapter
->
hw
.
mac
.
type
>
=
e1000_82580
)
regval
<<=
IGB_82580_TSYNC_SHIFT
;
ns
=
timecounter_cyc2time
(
&
adapter
->
clock
,
regval
);
...
...
@@ -5882,6 +5869,23 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
igb_systim_to_hwtstamp
(
adapter
,
skb_hwtstamps
(
skb
),
regval
);
}
static
void
igb_rx_vlan
(
struct
igb_ring
*
ring
,
union
e1000_adv_rx_desc
*
rx_desc
,
struct
sk_buff
*
skb
)
{
if
(
igb_test_staterr
(
rx_desc
,
E1000_RXD_STAT_VP
))
{
u16
vid
;
if
(
igb_test_staterr
(
rx_desc
,
E1000_RXDEXT_STATERR_LB
)
&&
test_bit
(
IGB_RING_FLAG_RX_LB_VLAN_BSWAP
,
&
ring
->
flags
))
vid
=
be16_to_cpu
(
rx_desc
->
wb
.
upper
.
vlan
);
else
vid
=
le16_to_cpu
(
rx_desc
->
wb
.
upper
.
vlan
);
__vlan_hwaccel_put_tag
(
skb
,
vid
);
}
}
static
inline
u16
igb_get_hlen
(
union
e1000_adv_rx_desc
*
rx_desc
)
{
/* HW will not DMA in data larger than the given buffer, even if it
...
...
@@ -5978,12 +5982,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
igb_rx_hwtstamp
(
q_vector
,
rx_desc
,
skb
);
igb_rx_hash
(
rx_ring
,
rx_desc
,
skb
);
igb_rx_checksum
(
rx_ring
,
rx_desc
,
skb
);
if
(
igb_test_staterr
(
rx_desc
,
E1000_RXD_STAT_VP
))
{
u16
vid
=
le16_to_cpu
(
rx_desc
->
wb
.
upper
.
vlan
);
__vlan_hwaccel_put_tag
(
skb
,
vid
);
}
igb_rx_vlan
(
rx_ring
,
rx_desc
,
skb
);
total_bytes
+=
skb
->
len
;
total_packets
++
;
...
...
@@ -6285,7 +6284,7 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
* timestamped, so enable timestamping in all packets as
* long as one rx filter was configured.
*/
if
((
hw
->
mac
.
type
=
=
e1000_82580
)
&&
tsync_rx_ctl
)
{
if
((
hw
->
mac
.
type
>
=
e1000_82580
)
&&
tsync_rx_ctl
)
{
tsync_rx_ctl
=
E1000_TSYNCRXCTL_ENABLED
;
tsync_rx_ctl
|=
E1000_TSYNCRXCTL_TYPE_ALL
;
}
...
...
@@ -6671,18 +6670,15 @@ static void igb_netpoll(struct net_device *netdev)
{
struct
igb_adapter
*
adapter
=
netdev_priv
(
netdev
);
struct
e1000_hw
*
hw
=
&
adapter
->
hw
;
struct
igb_q_vector
*
q_vector
;
int
i
;
if
(
!
adapter
->
msix_entries
)
{
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
0
];
igb_irq_disable
(
adapter
);
napi_schedule
(
&
q_vector
->
napi
);
return
;
}
for
(
i
=
0
;
i
<
adapter
->
num_q_vectors
;
i
++
)
{
struct
igb_q_vector
*
q_vector
=
adapter
->
q_vector
[
i
];
wr32
(
E1000_EIMC
,
q_vector
->
eims_value
);
q_vector
=
adapter
->
q_vector
[
i
];
if
(
adapter
->
msix_entries
)
wr32
(
E1000_EIMC
,
q_vector
->
eims_value
);
else
igb_irq_disable
(
adapter
);
napi_schedule
(
&
q_vector
->
napi
);
}
}
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe.h
View file @
97ba0eb6
...
...
@@ -116,6 +116,8 @@
#define MAX_EMULATION_MAC_ADDRS 16
#define IXGBE_MAX_PF_MACVLANS 15
#define VMDQ_P(p) ((p) + adapter->num_vfs)
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
struct
vf_data_storage
{
unsigned
char
vf_mac_addresses
[
ETH_ALEN
];
...
...
@@ -512,6 +514,8 @@ struct ixgbe_adapter {
struct
hlist_head
fdir_filter_list
;
union
ixgbe_atr_input
fdir_mask
;
int
fdir_filter_count
;
u32
timer_event_accumulator
;
u32
vferr_refcount
;
};
struct
ixgbe_fdir_filter
{
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
View file @
97ba0eb6
...
...
@@ -318,7 +318,15 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
.
selector
=
DCB_APP_IDTYPE_ETHTYPE
,
.
protocol
=
ETH_P_FCOE
,
};
u8
up
=
dcb_getapp
(
netdev
,
&
app
);
u8
up
;
/* In IEEE mode, use the IEEE Ethertype selector value */
if
(
adapter
->
dcbx_cap
&
DCB_CAP_DCBX_VER_IEEE
)
{
app
.
selector
=
IEEE_8021QAZ_APP_SEL_ETHERTYPE
;
up
=
dcb_ieee_getapp_mask
(
netdev
,
&
app
);
}
else
{
up
=
dcb_getapp
(
netdev
,
&
app
);
}
#endif
/* Fail command if not in CEE mode */
...
...
@@ -331,7 +339,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
return
DCB_NO_HW_CHG
;
#ifdef IXGBE_FCOE
if
(
up
&&
(
up
!=
(
1
<<
adapter
->
fcoe
.
up
)))
if
(
up
&&
!
(
up
&
(
1
<<
adapter
->
fcoe
.
up
)))
adapter
->
dcb_set_bitmap
|=
BIT_APP_UPCHG
;
/*
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
View file @
97ba0eb6
...
...
@@ -113,6 +113,8 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
{
"rx_fcoe_dropped"
,
IXGBE_STAT
(
stats
.
fcoerpdc
)},
{
"rx_fcoe_packets"
,
IXGBE_STAT
(
stats
.
fcoeprc
)},
{
"rx_fcoe_dwords"
,
IXGBE_STAT
(
stats
.
fcoedwrc
)},
{
"fcoe_noddp"
,
IXGBE_STAT
(
stats
.
fcoe_noddp
)},
{
"fcoe_noddp_ext_buff"
,
IXGBE_STAT
(
stats
.
fcoe_noddp_ext_buff
)},
{
"tx_fcoe_packets"
,
IXGBE_STAT
(
stats
.
fcoeptc
)},
{
"tx_fcoe_dwords"
,
IXGBE_STAT
(
stats
.
fcoedwtc
)},
#endif
/* IXGBE_FCOE */
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
View file @
97ba0eb6
...
...
@@ -145,6 +145,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
u32
fcbuff
,
fcdmarw
,
fcfltrw
,
fcrxctl
;
dma_addr_t
addr
=
0
;
struct
pci_pool
*
pool
;
unsigned
int
cpu
;
if
(
!
netdev
||
!
sgl
)
return
0
;
...
...
@@ -182,7 +183,8 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
}
/* alloc the udl from per cpu ddp pool */
pool
=
*
per_cpu_ptr
(
fcoe
->
pool
,
get_cpu
());
cpu
=
get_cpu
();
pool
=
*
per_cpu_ptr
(
fcoe
->
pool
,
cpu
);
ddp
->
udl
=
pci_pool_alloc
(
pool
,
GFP_ATOMIC
,
&
ddp
->
udp
);
if
(
!
ddp
->
udl
)
{
e_err
(
drv
,
"failed allocated ddp context
\n
"
);
...
...
@@ -199,9 +201,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
while
(
len
)
{
/* max number of buffers allowed in one DDP context */
if
(
j
>=
IXGBE_BUFFCNT_MAX
)
{
e_err
(
drv
,
"xid=%x:%d,%d,%d:addr=%llx "
"not enough descriptors
\n
"
,
xid
,
i
,
j
,
dmacount
,
(
u64
)
addr
);
*
per_cpu_ptr
(
fcoe
->
pcpu_noddp
,
cpu
)
+=
1
;
goto
out_noddp_free
;
}
...
...
@@ -241,12 +241,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
*/
if
(
lastsize
==
bufflen
)
{
if
(
j
>=
IXGBE_BUFFCNT_MAX
)
{
printk_once
(
"Will NOT use DDP since there are not "
"enough user buffers. We need an extra "
"buffer because lastsize is bufflen. "
"xid=%x:%d,%d,%d:addr=%llx
\n
"
,
xid
,
i
,
j
,
dmacount
,
(
u64
)
addr
);
*
per_cpu_ptr
(
fcoe
->
pcpu_noddp_ext_buff
,
cpu
)
+=
1
;
goto
out_noddp_free
;
}
...
...
@@ -600,6 +595,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
struct
ixgbe_hw
*
hw
=
&
adapter
->
hw
;
struct
ixgbe_fcoe
*
fcoe
=
&
adapter
->
fcoe
;
struct
ixgbe_ring_feature
*
f
=
&
adapter
->
ring_feature
[
RING_F_FCOE
];
unsigned
int
cpu
;
if
(
!
fcoe
->
pool
)
{
spin_lock_init
(
&
fcoe
->
lock
);
...
...
@@ -627,6 +623,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
e_err
(
drv
,
"failed to map extra DDP buffer
\n
"
);
goto
out_extra_ddp_buffer
;
}
/* Alloc per cpu mem to count the ddp alloc failure number */
fcoe
->
pcpu_noddp
=
alloc_percpu
(
u64
);
if
(
!
fcoe
->
pcpu_noddp
)
{
e_err
(
drv
,
"failed to alloc noddp counter
\n
"
);
goto
out_pcpu_noddp_alloc_fail
;
}
fcoe
->
pcpu_noddp_ext_buff
=
alloc_percpu
(
u64
);
if
(
!
fcoe
->
pcpu_noddp_ext_buff
)
{
e_err
(
drv
,
"failed to alloc noddp extra buff cnt
\n
"
);
goto
out_pcpu_noddp_extra_buff_alloc_fail
;
}
for_each_possible_cpu
(
cpu
)
{
*
per_cpu_ptr
(
fcoe
->
pcpu_noddp
,
cpu
)
=
0
;
*
per_cpu_ptr
(
fcoe
->
pcpu_noddp_ext_buff
,
cpu
)
=
0
;
}
}
/* Enable L2 eth type filter for FCoE */
...
...
@@ -664,7 +678,13 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG
(
hw
,
IXGBE_FCRXCTRL
,
IXGBE_FCRXCTRL_FCCRCBO
|
(
FC_FCOE_VER
<<
IXGBE_FCRXCTRL_FCOEVER_SHIFT
));
return
;
out_pcpu_noddp_extra_buff_alloc_fail:
free_percpu
(
fcoe
->
pcpu_noddp
);
out_pcpu_noddp_alloc_fail:
dma_unmap_single
(
&
adapter
->
pdev
->
dev
,
fcoe
->
extra_ddp_buffer_dma
,
IXGBE_FCBUFF_MIN
,
DMA_FROM_DEVICE
);
out_extra_ddp_buffer:
kfree
(
fcoe
->
extra_ddp_buffer
);
out_ddp_pools:
...
...
@@ -693,6 +713,8 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
fcoe
->
extra_ddp_buffer_dma
,
IXGBE_FCBUFF_MIN
,
DMA_FROM_DEVICE
);
free_percpu
(
fcoe
->
pcpu_noddp
);
free_percpu
(
fcoe
->
pcpu_noddp_ext_buff
);
kfree
(
fcoe
->
extra_ddp_buffer
);
ixgbe_fcoe_ddp_pools_free
(
fcoe
);
}
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
View file @
97ba0eb6
...
...
@@ -73,6 +73,8 @@ struct ixgbe_fcoe {
unsigned
char
*
extra_ddp_buffer
;
dma_addr_t
extra_ddp_buffer_dma
;
unsigned
long
mode
;
u64
__percpu
*
pcpu_noddp
;
u64
__percpu
*
pcpu_noddp_ext_buff
;
#ifdef CONFIG_IXGBE_DCB
u8
up
;
#endif
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
View file @
97ba0eb6
...
...
@@ -5552,6 +5552,11 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u64
non_eop_descs
=
0
,
restart_queue
=
0
,
tx_busy
=
0
;
u64
alloc_rx_page_failed
=
0
,
alloc_rx_buff_failed
=
0
;
u64
bytes
=
0
,
packets
=
0
;
#ifdef IXGBE_FCOE
struct
ixgbe_fcoe
*
fcoe
=
&
adapter
->
fcoe
;
unsigned
int
cpu
;
u64
fcoe_noddp_counts_sum
=
0
,
fcoe_noddp_ext_buff_counts_sum
=
0
;
#endif
/* IXGBE_FCOE */
if
(
test_bit
(
__IXGBE_DOWN
,
&
adapter
->
state
)
||
test_bit
(
__IXGBE_RESETTING
,
&
adapter
->
state
))
...
...
@@ -5679,6 +5684,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats
->
fcoeptc
+=
IXGBE_READ_REG
(
hw
,
IXGBE_FCOEPTC
);
hwstats
->
fcoedwrc
+=
IXGBE_READ_REG
(
hw
,
IXGBE_FCOEDWRC
);
hwstats
->
fcoedwtc
+=
IXGBE_READ_REG
(
hw
,
IXGBE_FCOEDWTC
);
/* Add up per cpu counters for total ddp aloc fail */
if
(
fcoe
->
pcpu_noddp
&&
fcoe
->
pcpu_noddp_ext_buff
)
{
for_each_possible_cpu
(
cpu
)
{
fcoe_noddp_counts_sum
+=
*
per_cpu_ptr
(
fcoe
->
pcpu_noddp
,
cpu
);
fcoe_noddp_ext_buff_counts_sum
+=
*
per_cpu_ptr
(
fcoe
->
pcpu_noddp_ext_buff
,
cpu
);
}
}
hwstats
->
fcoe_noddp
=
fcoe_noddp_counts_sum
;
hwstats
->
fcoe_noddp_ext_buff
=
fcoe_noddp_ext_buff_counts_sum
;
#endif
/* IXGBE_FCOE */
break
;
default:
...
...
@@ -6112,6 +6129,51 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
clear_bit
(
__IXGBE_IN_SFP_INIT
,
&
adapter
->
state
);
}
#ifdef CONFIG_PCI_IOV
static
void
ixgbe_check_for_bad_vf
(
struct
ixgbe_adapter
*
adapter
)
{
int
vf
;
struct
ixgbe_hw
*
hw
=
&
adapter
->
hw
;
struct
net_device
*
netdev
=
adapter
->
netdev
;
u32
gpc
;
u32
ciaa
,
ciad
;
gpc
=
IXGBE_READ_REG
(
hw
,
IXGBE_TXDGPC
);
if
(
gpc
)
/* If incrementing then no need for the check below */
return
;
/*
* Check to see if a bad DMA write target from an errant or
* malicious VF has caused a PCIe error. If so then we can
* issue a VFLR to the offending VF(s) and then resume without
* requesting a full slot reset.
*/
for
(
vf
=
0
;
vf
<
adapter
->
num_vfs
;
vf
++
)
{
ciaa
=
(
vf
<<
16
)
|
0x80000000
;
/* 32 bit read so align, we really want status at offset 6 */
ciaa
|=
PCI_COMMAND
;
IXGBE_WRITE_REG
(
hw
,
IXGBE_CIAA_82599
,
ciaa
);
ciad
=
IXGBE_READ_REG
(
hw
,
IXGBE_CIAD_82599
);
ciaa
&=
0x7FFFFFFF
;
/* disable debug mode asap after reading data */
IXGBE_WRITE_REG
(
hw
,
IXGBE_CIAA_82599
,
ciaa
);
/* Get the upper 16 bits which will be the PCI status reg */
ciad
>>=
16
;
if
(
ciad
&
PCI_STATUS_REC_MASTER_ABORT
)
{
netdev_err
(
netdev
,
"VF %d Hung DMA
\n
"
,
vf
);
/* Issue VFLR */
ciaa
=
(
vf
<<
16
)
|
0x80000000
;
ciaa
|=
0xA8
;
IXGBE_WRITE_REG
(
hw
,
IXGBE_CIAA_82599
,
ciaa
);
ciad
=
0x00008000
;
/* VFLR */
IXGBE_WRITE_REG
(
hw
,
IXGBE_CIAD_82599
,
ciad
);
ciaa
&=
0x7FFFFFFF
;
IXGBE_WRITE_REG
(
hw
,
IXGBE_CIAA_82599
,
ciaa
);
}
}
}
#endif
/**
* ixgbe_service_timer - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
...
...
@@ -6120,17 +6182,49 @@ static void ixgbe_service_timer(unsigned long data)
{
struct
ixgbe_adapter
*
adapter
=
(
struct
ixgbe_adapter
*
)
data
;
unsigned
long
next_event_offset
;
bool
ready
=
true
;
#ifdef CONFIG_PCI_IOV
ready
=
false
;
/*
* don't bother with SR-IOV VF DMA hang check if there are
* no VFs or the link is down
*/
if
(
!
adapter
->
num_vfs
||
(
adapter
->
flags
&
IXGBE_FLAG_NEED_LINK_UPDATE
))
{
ready
=
true
;
goto
normal_timer_service
;
}
/* If we have VFs allocated then we must check for DMA hangs */
ixgbe_check_for_bad_vf
(
adapter
);
next_event_offset
=
HZ
/
50
;
adapter
->
timer_event_accumulator
++
;
if
(
adapter
->
timer_event_accumulator
>=
100
)
{
ready
=
true
;
adapter
->
timer_event_accumulator
=
0
;
}
goto
schedule_event
;
normal_timer_service:
#endif
/* poll faster when waiting for link */
if
(
adapter
->
flags
&
IXGBE_FLAG_NEED_LINK_UPDATE
)
next_event_offset
=
HZ
/
10
;
else
next_event_offset
=
HZ
*
2
;
#ifdef CONFIG_PCI_IOV
schedule_event:
#endif
/* Reset the timer */
mod_timer
(
&
adapter
->
service_timer
,
next_event_offset
+
jiffies
);
ixgbe_service_event_schedule
(
adapter
);
if
(
ready
)
ixgbe_service_event_schedule
(
adapter
);
}
static
void
ixgbe_reset_subtask
(
struct
ixgbe_adapter
*
adapter
)
...
...
@@ -7717,6 +7811,91 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
struct
ixgbe_adapter
*
adapter
=
pci_get_drvdata
(
pdev
);
struct
net_device
*
netdev
=
adapter
->
netdev
;
#ifdef CONFIG_PCI_IOV
struct
pci_dev
*
bdev
,
*
vfdev
;
u32
dw0
,
dw1
,
dw2
,
dw3
;
int
vf
,
pos
;
u16
req_id
,
pf_func
;
if
(
adapter
->
hw
.
mac
.
type
==
ixgbe_mac_82598EB
||
adapter
->
num_vfs
==
0
)
goto
skip_bad_vf_detection
;
bdev
=
pdev
->
bus
->
self
;
while
(
bdev
&&
(
bdev
->
pcie_type
!=
PCI_EXP_TYPE_ROOT_PORT
))
bdev
=
bdev
->
bus
->
self
;
if
(
!
bdev
)
goto
skip_bad_vf_detection
;
pos
=
pci_find_ext_capability
(
bdev
,
PCI_EXT_CAP_ID_ERR
);
if
(
!
pos
)
goto
skip_bad_vf_detection
;
pci_read_config_dword
(
bdev
,
pos
+
PCI_ERR_HEADER_LOG
,
&
dw0
);
pci_read_config_dword
(
bdev
,
pos
+
PCI_ERR_HEADER_LOG
+
4
,
&
dw1
);
pci_read_config_dword
(
bdev
,
pos
+
PCI_ERR_HEADER_LOG
+
8
,
&
dw2
);
pci_read_config_dword
(
bdev
,
pos
+
PCI_ERR_HEADER_LOG
+
12
,
&
dw3
);
req_id
=
dw1
>>
16
;
/* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
if
(
!
(
req_id
&
0x0080
))
goto
skip_bad_vf_detection
;
pf_func
=
req_id
&
0x01
;
if
((
pf_func
&
1
)
==
(
pdev
->
devfn
&
1
))
{
unsigned
int
device_id
;
vf
=
(
req_id
&
0x7F
)
>>
1
;
e_dev_err
(
"VF %d has caused a PCIe error
\n
"
,
vf
);
e_dev_err
(
"TLP: dw0: %8.8x
\t
dw1: %8.8x
\t
dw2: "
"%8.8x
\t
dw3: %8.8x
\n
"
,
dw0
,
dw1
,
dw2
,
dw3
);
switch
(
adapter
->
hw
.
mac
.
type
)
{
case
ixgbe_mac_82599EB
:
device_id
=
IXGBE_82599_VF_DEVICE_ID
;
break
;
case
ixgbe_mac_X540
:
device_id
=
IXGBE_X540_VF_DEVICE_ID
;
break
;
default:
device_id
=
0
;
break
;
}
/* Find the pci device of the offending VF */
vfdev
=
pci_get_device
(
IXGBE_INTEL_VENDOR_ID
,
device_id
,
NULL
);
while
(
vfdev
)
{
if
(
vfdev
->
devfn
==
(
req_id
&
0xFF
))
break
;
vfdev
=
pci_get_device
(
IXGBE_INTEL_VENDOR_ID
,
device_id
,
vfdev
);
}
/*
* There's a slim chance the VF could have been hot plugged,
* so if it is no longer present we don't need to issue the
* VFLR. Just clean up the AER in that case.
*/
if
(
vfdev
)
{
e_dev_err
(
"Issuing VFLR to VF %d
\n
"
,
vf
);
pci_write_config_dword
(
vfdev
,
0xA8
,
0x00008000
);
}
pci_cleanup_aer_uncorrect_error_status
(
pdev
);
}
/*
* Even though the error may have occurred on the other port
* we still need to increment the vf error reference count for
* both ports because the I/O resume function will be called
* for both of them.
*/
adapter
->
vferr_refcount
++
;
return
PCI_ERS_RESULT_RECOVERED
;
skip_bad_vf_detection:
#endif
/* CONFIG_PCI_IOV */
netif_device_detach
(
netdev
);
if
(
state
==
pci_channel_io_perm_failure
)
...
...
@@ -7779,6 +7958,14 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
struct
ixgbe_adapter
*
adapter
=
pci_get_drvdata
(
pdev
);
struct
net_device
*
netdev
=
adapter
->
netdev
;
#ifdef CONFIG_PCI_IOV
if
(
adapter
->
vferr_refcount
)
{
e_info
(
drv
,
"Resuming after VF err
\n
"
);
adapter
->
vferr_refcount
--
;
return
;
}
#endif
if
(
netif_running
(
netdev
))
ixgbe_up
(
adapter
);
...
...
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
View file @
97ba0eb6
...
...
@@ -2682,6 +2682,8 @@ struct ixgbe_hw_stats {
u64
fcoeptc
;
u64
fcoedwrc
;
u64
fcoedwtc
;
u64
fcoe_noddp
;
u64
fcoe_noddp_ext_buff
;
u64
b2ospc
;
u64
b2ogprc
;
u64
o2bgptc
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment