Commit 11697cfc authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2019-06-28

This series contains a smorgasbord of updates to many of the Intel
drivers.

Gustavo A. R. Silva updates the ice and iavf drivers to use the
strcut_size() helper where possible.

Miguel increases the pause and refresh time for flow control in the
e1000e driver during reset for certain devices.

Dann Frazier fixes a potential NULL pointer dereference in ixgbe driver
when using non-IPSec enabled devices.

Colin Ian King fixes a potential overflow during a shift in the ixgbe
driver.  Also fixes a potential NULL pointer dereference in the iavf
driver by adding a check.

Venkatesh Srinivas converts the e1000 driver to use dma_wmb() instead of
wmb() for doorbell writes to avoid SFENCEs in the transmit and receive
paths.

Arjan updates the e1000e driver to improve boot time by over 100 msec by
reducing the usleep ranges suring system startup.

Artem updates the igb driver register dump in ethtool, first prepares
the register dump for future additions of registers in the dump, then
secondly, adds the RR2DCDELAY register to the dump.  When dealing with
time-sensitive networks, this register is helpful in determining your
latency from the device to the ring.

Alex fixes the ixgbevf driver to use the current cached link state,
rather than trying to re-check the value from the PF.

Harshitha adds support for MACVLAN offloads in i40e by using channels as
MACVLAN interfaces.

Detlev Casanova updates the e1000e driver to use delayed work instead of
timers to run the watchdog.

Vitaly fixes an issue in e1000e, where when disconnecting and
reconnecting the physical cable connection, the NIC enters a DMoff
state.  This state causes a mismatch in link and duplexing, so check the
PCIm function state and perform a PHY reset when in this state to
resolve the issue.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f072218c def4ec6d
......@@ -3019,7 +3019,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
dma_wmb();
tx_ring->next_to_use = i;
}
......@@ -4540,7 +4540,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
dma_wmb();
writel(i, adapter->hw.hw_addr + rx_ring->rdt);
}
}
......@@ -4655,7 +4655,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
dma_wmb();
writel(i, hw->hw_addr + rx_ring->rdt);
}
}
......
......@@ -680,7 +680,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ew32(TCTL, E1000_TCTL_PSP);
e1e_flush();
usleep_range(10000, 20000);
usleep_range(10000, 11000);
ctrl = er32(CTRL);
......
......@@ -959,7 +959,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(TCTL, tctl);
e1e_flush();
usleep_range(10000, 20000);
usleep_range(10000, 11000);
/* Must acquire the MDIO ownership before MAC reset.
* Ownership defaults to firmware after a reset.
......
......@@ -222,6 +222,9 @@
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */
/* PCIm function state */
#define E1000_STATUS_PCIM_STATE 0x40000000
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
......
......@@ -186,12 +186,13 @@ struct e1000_phy_regs {
/* board specific private data structure */
struct e1000_adapter {
struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct timer_list blink_timer;
struct work_struct reset_task;
struct work_struct watchdog_task;
struct delayed_work watchdog_task;
struct workqueue_struct *e1000_workqueue;
const struct e1000_info *ei;
......
......@@ -1014,7 +1014,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
e1e_flush();
usleep_range(10000, 20000);
usleep_range(10000, 11000);
/* Test each interrupt */
for (i = 0; i < 10; i++) {
......@@ -1046,7 +1046,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
ew32(IMC, mask);
ew32(ICS, mask);
e1e_flush();
usleep_range(10000, 20000);
usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 3;
......@@ -1064,7 +1064,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
ew32(IMS, mask);
ew32(ICS, mask);
e1e_flush();
usleep_range(10000, 20000);
usleep_range(10000, 11000);
if (!(adapter->test_icr & mask)) {
*data = 4;
......@@ -1082,7 +1082,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
ew32(IMC, ~mask & 0x00007FFF);
ew32(ICS, ~mask & 0x00007FFF);
e1e_flush();
usleep_range(10000, 20000);
usleep_range(10000, 11000);
if (adapter->test_icr) {
*data = 5;
......@@ -1094,7 +1094,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
e1e_flush();
usleep_range(10000, 20000);
usleep_range(10000, 11000);
/* Unhook test interrupt handler */
free_irq(irq, netdev);
......@@ -1470,7 +1470,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
*/
ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK);
e1e_flush();
usleep_range(10000, 20000);
usleep_range(10000, 11000);
return 0;
}
......@@ -1584,7 +1584,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
hw->phy.media_type == e1000_media_type_internal_serdes) {
ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
e1e_flush();
usleep_range(10000, 20000);
usleep_range(10000, 11000);
break;
}
/* Fall Through */
......
......@@ -271,7 +271,7 @@ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
u16 count = 20;
do {
usleep_range(5000, 10000);
usleep_range(5000, 6000);
} while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
msleep(30);
......@@ -405,7 +405,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
/* Ungate automatic PHY configuration on non-managed 82579 */
if ((hw->mac.type == e1000_pch2lan) &&
!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
usleep_range(10000, 20000);
usleep_range(10000, 11000);
e1000_gate_hw_phy_config_ich8lan(hw, false);
}
......@@ -531,7 +531,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
phy->id = 0;
while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
(i++ < 100)) {
usleep_range(1000, 2000);
usleep_range(1000, 1100);
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
return ret_val;
......@@ -1244,7 +1244,7 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
goto out;
}
usleep_range(10000, 20000);
usleep_range(10000, 11000);
}
e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
......@@ -1999,7 +1999,7 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
(i++ < 30))
usleep_range(10000, 20000);
usleep_range(10000, 11000);
return blocked ? E1000_BLK_PHY_RESET : 0;
}
......@@ -2818,7 +2818,7 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
return 0;
/* Allow time for h/w to get to quiescent state after reset */
usleep_range(10000, 20000);
usleep_range(10000, 11000);
/* Perform any necessary post-reset workarounds */
switch (hw->mac.type) {
......@@ -2854,7 +2854,7 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
if (hw->mac.type == e1000_pch2lan) {
/* Ungate automatic PHY configuration on non-managed 82579 */
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
usleep_range(10000, 20000);
usleep_range(10000, 11000);
e1000_gate_hw_phy_config_ich8lan(hw, false);
}
......@@ -3875,7 +3875,7 @@ static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
*/
if (!ret_val) {
nvm->ops.reload(hw);
usleep_range(10000, 20000);
usleep_range(10000, 11000);
}
out:
......@@ -4026,7 +4026,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
if (!ret_val) {
nvm->ops.reload(hw);
usleep_range(10000, 20000);
usleep_range(10000, 11000);
}
out:
......@@ -4650,7 +4650,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ew32(TCTL, E1000_TCTL_PSP);
e1e_flush();
usleep_range(10000, 20000);
usleep_range(10000, 11000);
/* Workaround for ICH8 bit corruption issue in FIFO memory */
if (hw->mac.type == e1000_ich8lan) {
......
......@@ -797,7 +797,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
* milliseconds even if the other end is doing it in SW).
*/
for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
usleep_range(10000, 20000);
usleep_range(10000, 11000);
status = er32(STATUS);
if (status & E1000_STATUS_LU)
break;
......
......@@ -1780,7 +1780,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
queue_delayed_work(adapter->e1000_workqueue,
&adapter->watchdog_task, 1);
}
/* Reset on uncorrectable ECC error */
......@@ -1860,7 +1861,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
queue_delayed_work(adapter->e1000_workqueue,
&adapter->watchdog_task, 1);
}
/* Reset on uncorrectable ECC error */
......@@ -1905,7 +1907,8 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
queue_delayed_work(adapter->e1000_workqueue,
&adapter->watchdog_task, 1);
}
if (!test_bit(__E1000_DOWN, &adapter->state))
......@@ -3208,7 +3211,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e1e_flush();
usleep_range(10000, 20000);
usleep_range(10000, 11000);
if (adapter->flags2 & FLAG2_DMA_BURST) {
/* set the writeback threshold (only takes effect if the RDTR
......@@ -4046,12 +4049,12 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_lpt:
case e1000_pch_spt:
case e1000_pch_cnp:
fc->refresh_time = 0x0400;
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
if (adapter->netdev->mtu <= ETH_DATA_LEN) {
fc->high_water = 0x05C20;
fc->low_water = 0x05048;
fc->pause_time = 0x0650;
break;
}
......@@ -4272,13 +4275,12 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
/* flush both disables and wait for them to finish */
e1e_flush();
usleep_range(10000, 20000);
usleep_range(10000, 11000);
e1000_irq_disable(adapter);
napi_synchronize(&adapter->napi);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
spin_lock(&adapter->stats64_lock);
......@@ -4310,7 +4312,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
{
might_sleep();
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
usleep_range(1000, 1100);
e1000e_down(adapter, true);
e1000e_up(adapter);
clear_bit(__E1000_RESETTING, &adapter->state);
......@@ -4707,7 +4709,7 @@ int e1000e_close(struct net_device *netdev)
int count = E1000_CHECK_RESET_COUNT;
while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
usleep_range(10000, 20000);
usleep_range(10000, 11000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
......@@ -5150,31 +5152,18 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
}
}
/**
* e1000_watchdog - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
**/
static void e1000_watchdog(struct timer_list *t)
{
struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
/* TODO: make this use queue_delayed_work() */
}
static void e1000_watchdog_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
watchdog_task);
watchdog_task.work);
struct net_device *netdev = adapter->netdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
struct e1000_phy_info *phy = &adapter->hw.phy;
struct e1000_ring *tx_ring = adapter->tx_ring;
u32 dmoff_exit_timeout = 100, tries = 0;
struct e1000_hw *hw = &adapter->hw;
u32 link, tctl;
u32 link, tctl, pcim_state;
if (test_bit(__E1000_DOWN, &adapter->state))
return;
......@@ -5199,6 +5188,21 @@ static void e1000_watchdog_task(struct work_struct *work)
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
/* Checking if MAC is in DMoff state*/
pcim_state = er32(STATUS);
while (pcim_state & E1000_STATUS_PCIM_STATE) {
if (tries++ == dmoff_exit_timeout) {
e_dbg("Error in exiting dmoff\n");
break;
}
usleep_range(10000, 20000);
pcim_state = er32(STATUS);
/* Checking if MAC exited DMoff state */
if (!(pcim_state & E1000_STATUS_PCIM_STATE))
e1000_phy_hw_reset(&adapter->hw);
}
/* update snapshot of PHY registers on LSC */
e1000_phy_read_status(adapter);
mac->ops.get_link_up_info(&adapter->hw,
......@@ -5400,8 +5404,9 @@ static void e1000_watchdog_task(struct work_struct *work)
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer,
round_jiffies(jiffies + 2 * HZ));
queue_delayed_work(adapter->e1000_workqueue,
&adapter->watchdog_task,
round_jiffies(2 * HZ));
}
#define E1000_TX_FLAGS_CSUM 0x00000001
......@@ -6021,7 +6026,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
}
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
usleep_range(1000, 1100);
/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
adapter->max_frame_size = max_frame;
e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
......@@ -6301,7 +6306,7 @@ static int e1000e_pm_freeze(struct device *dev)
int count = E1000_CHECK_RESET_COUNT;
while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
usleep_range(10000, 20000);
usleep_range(10000, 11000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
......@@ -6716,7 +6721,7 @@ static int e1000e_pm_runtime_suspend(struct device *dev)
int count = E1000_CHECK_RESET_COUNT;
while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
usleep_range(10000, 20000);
usleep_range(10000, 11000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
......@@ -7256,11 +7261,21 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
adapter->e1000_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
e1000e_driver_name);
if (!adapter->e1000_workqueue) {
err = -ENOMEM;
goto err_workqueue;
}
INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task);
queue_delayed_work(adapter->e1000_workqueue, &adapter->watchdog_task,
0);
timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
INIT_WORK(&adapter->reset_task, e1000_reset_task);
INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
......@@ -7354,6 +7369,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_register:
flush_workqueue(adapter->e1000_workqueue);
destroy_workqueue(adapter->e1000_workqueue);
err_workqueue:
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_release_hw_control(adapter);
err_eeprom:
......@@ -7400,15 +7418,17 @@ static void e1000_remove(struct pci_dev *pdev)
*/
if (!down)
set_bit(__E1000_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->downshift_task);
cancel_work_sync(&adapter->update_phy_task);
cancel_work_sync(&adapter->print_hang_task);
cancel_delayed_work(&adapter->watchdog_task);
flush_workqueue(adapter->e1000_workqueue);
destroy_workqueue(adapter->e1000_workqueue);
if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
cancel_work_sync(&adapter->tx_hwtstamp_work);
if (adapter->tx_hwtstamp_skb) {
......
......@@ -392,7 +392,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
break;
}
}
usleep_range(10000, 20000);
usleep_range(10000, 11000);
nvm->ops.release(hw);
}
......
......@@ -27,6 +27,7 @@
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/if_macvlan.h>
#include <linux/if_bridge.h>
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
......@@ -412,6 +413,11 @@ struct i40e_flex_pit {
u8 pit_index;
};
struct i40e_fwd_adapter {
struct net_device *netdev;
int bit_no;
};
struct i40e_channel {
struct list_head list;
bool initialized;
......@@ -426,11 +432,25 @@ struct i40e_channel {
struct i40e_aqc_vsi_properties_data info;
u64 max_tx_rate;
struct i40e_fwd_adapter *fwd;
/* track this channel belongs to which VSI */
struct i40e_vsi *parent_vsi;
};
static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch)
{
return !!ch->fwd;
}
static inline u8 *i40e_channel_mac(struct i40e_channel *ch)
{
if (i40e_is_channel_macvlan(ch))
return ch->fwd->netdev->dev_addr;
else
return NULL;
}
/* struct that defines the Ethernet device */
struct i40e_pf {
struct pci_dev *pdev;
......@@ -813,6 +833,13 @@ struct i40e_vsi {
struct list_head ch_list;
u16 tc_seid_map[I40E_MAX_TRAFFIC_CLASS];
/* macvlan fields */
#define I40E_MAX_MACVLANS 128 /* Max HW vectors - 1 on FVL */
#define I40E_MIN_MACVLAN_VECTORS 2 /* Min vectors to enable macvlans */
DECLARE_BITMAP(fwd_bitmask, I40E_MAX_MACVLANS);
struct list_head macvlan_list;
int macvlan_cnt;
void *priv; /* client driver data reference. */
/* VSI specific handlers */
......
This diff is collapsed.
......@@ -44,8 +44,12 @@ struct iavf_virt_mem {
#define iavf_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s)
#define iavf_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m)
#define iavf_debug(h, m, s, ...) iavf_debug_d(h, m, s, ##__VA_ARGS__)
extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
__printf(3, 4);
#define iavf_debug(h, m, s, ...) \
do { \
if (((m) & (h)->debug_mask)) \
pr_info("iavf %02x:%02x.%x " s, \
(h)->bus.bus_id, (h)->bus.device, \
(h)->bus.func, ##__VA_ARGS__); \
} while (0)
#endif /* _IAVF_OSDEP_H_ */
......@@ -1296,7 +1296,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
struct iavf_rx_buffer *rx_buffer,
unsigned int size)
{
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
void *va;
#if (PAGE_SIZE < 8192)
unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else
......@@ -1308,6 +1308,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
if (!rx_buffer)
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
prefetch(va);
#if L1_CACHE_BYTES < 128
prefetch(va + L1_CACHE_BYTES);
......@@ -1362,7 +1363,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
struct iavf_rx_buffer *rx_buffer,
unsigned int size)
{
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
void *va;
#if (PAGE_SIZE < 8192)
unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
#else
......@@ -1374,6 +1375,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
if (!rx_buffer)
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
prefetch(va);
#if L1_CACHE_BYTES < 128
prefetch(va + L1_CACHE_BYTES);
......
......@@ -242,7 +242,8 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
struct virtchnl_vsi_queue_config_info *vqci;
struct virtchnl_queue_pair_info *vqpi;
int pairs = adapter->num_active_queues;
int i, len, max_frame = IAVF_MAX_RXBUFFER;
int i, max_frame = IAVF_MAX_RXBUFFER;
size_t len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
......@@ -251,8 +252,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
return;
}
adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
len = sizeof(struct virtchnl_vsi_queue_config_info) +
(sizeof(struct virtchnl_queue_pair_info) * pairs);
len = struct_size(vqci, qpair, pairs);
vqci = kzalloc(len, GFP_KERNEL);
if (!vqci)
return;
......@@ -351,8 +351,9 @@ void iavf_map_queues(struct iavf_adapter *adapter)
{
struct virtchnl_irq_map_info *vimi;
struct virtchnl_vector_map *vecmap;
int v_idx, q_vectors, len;
struct iavf_q_vector *q_vector;
int v_idx, q_vectors;
size_t len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
......@@ -364,9 +365,7 @@ void iavf_map_queues(struct iavf_adapter *adapter)
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
len = sizeof(struct virtchnl_irq_map_info) +
(adapter->num_msix_vectors *
sizeof(struct virtchnl_vector_map));
len = struct_size(vimi, vecmap, adapter->num_msix_vectors);
vimi = kzalloc(len, GFP_KERNEL);
if (!vimi)
return;
......@@ -433,9 +432,10 @@ int iavf_request_queues(struct iavf_adapter *adapter, int num)
void iavf_add_ether_addrs(struct iavf_adapter *adapter)
{
struct virtchnl_ether_addr_list *veal;
int len, i = 0, count = 0;
struct iavf_mac_filter *f;
int i = 0, count = 0;
bool more = false;
size_t len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
......@@ -457,15 +457,13 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter)
}
adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
len = sizeof(struct virtchnl_ether_addr_list) +
(count * sizeof(struct virtchnl_ether_addr));
len = struct_size(veal, list, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
count = (IAVF_MAX_AQ_BUF_SIZE -
sizeof(struct virtchnl_ether_addr_list)) /
sizeof(struct virtchnl_ether_addr);
len = sizeof(struct virtchnl_ether_addr_list) +
(count * sizeof(struct virtchnl_ether_addr));
len = struct_size(veal, list, count);
more = true;
}
......@@ -505,8 +503,9 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
{
struct virtchnl_ether_addr_list *veal;
struct iavf_mac_filter *f, *ftmp;
int len, i = 0, count = 0;
int i = 0, count = 0;
bool more = false;
size_t len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
......@@ -528,15 +527,13 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
}
adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
len = sizeof(struct virtchnl_ether_addr_list) +
(count * sizeof(struct virtchnl_ether_addr));
len = struct_size(veal, list, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
count = (IAVF_MAX_AQ_BUF_SIZE -
sizeof(struct virtchnl_ether_addr_list)) /
sizeof(struct virtchnl_ether_addr);
len = sizeof(struct virtchnl_ether_addr_list) +
(count * sizeof(struct virtchnl_ether_addr));
len = struct_size(veal, list, count);
more = true;
}
veal = kzalloc(len, GFP_ATOMIC);
......@@ -973,7 +970,7 @@ static void iavf_print_link_message(struct iavf_adapter *adapter)
void iavf_enable_channels(struct iavf_adapter *adapter)
{
struct virtchnl_tc_info *vti = NULL;
u16 len;
size_t len;
int i;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
......@@ -983,9 +980,7 @@ void iavf_enable_channels(struct iavf_adapter *adapter)
return;
}
len = ((adapter->num_tc - 1) * sizeof(struct virtchnl_channel_info)) +
sizeof(struct virtchnl_tc_info);
len = struct_size(vti, list, adapter->num_tc - 1);
vti = kzalloc(len, GFP_KERNEL);
if (!vti)
return;
......
......@@ -683,10 +683,10 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
u16 i, num_groups_added = 0;
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u16 buf_size;
size_t buf_size;
u32 teid;
buf_size = sizeof(*buf) + sizeof(*buf->generic) * (num_nodes - 1);
buf_size = struct_size(buf, generic, num_nodes - 1);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
return ICE_ERR_NO_MEMORY;
......
......@@ -409,6 +409,8 @@ do { \
#define E1000_I210_TQAVCC(_n) (0x3004 + ((_n) * 0x40))
#define E1000_I210_TQAVHC(_n) (0x300C + ((_n) * 0x40))
#define E1000_I210_RR2DCDELAY 0x5BF4
#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
......
......@@ -448,7 +448,7 @@ static void igb_set_msglevel(struct net_device *netdev, u32 data)
static int igb_get_regs_len(struct net_device *netdev)
{
#define IGB_REGS_LEN 739
#define IGB_REGS_LEN 740
return IGB_REGS_LEN * sizeof(u32);
}
......@@ -675,41 +675,44 @@ static void igb_get_regs(struct net_device *netdev,
regs_buff[554] = adapter->stats.b2ogprc;
}
if (hw->mac.type != e1000_82576)
return;
for (i = 0; i < 12; i++)
regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4));
for (i = 0; i < 4; i++)
regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4));
for (i = 0; i < 12; i++)
regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4));
for (i = 0; i < 12; i++)
regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4));
for (i = 0; i < 12; i++)
regs_buff[607 + i] = rd32(E1000_RDH(i + 4));
for (i = 0; i < 12; i++)
regs_buff[619 + i] = rd32(E1000_RDT(i + 4));
for (i = 0; i < 12; i++)
regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4));
for (i = 0; i < 12; i++)
regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4));
for (i = 0; i < 12; i++)
regs_buff[679 + i] = rd32(E1000_TDH(i + 4));
for (i = 0; i < 12; i++)
regs_buff[691 + i] = rd32(E1000_TDT(i + 4));
for (i = 0; i < 12; i++)
regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4));
if (hw->mac.type == e1000_82576) {
for (i = 0; i < 12; i++)
regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4));
for (i = 0; i < 4; i++)
regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4));
for (i = 0; i < 12; i++)
regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4));
for (i = 0; i < 12; i++)
regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4));
for (i = 0; i < 12; i++)
regs_buff[607 + i] = rd32(E1000_RDH(i + 4));
for (i = 0; i < 12; i++)
regs_buff[619 + i] = rd32(E1000_RDT(i + 4));
for (i = 0; i < 12; i++)
regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4));
for (i = 0; i < 12; i++)
regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4));
for (i = 0; i < 12; i++)
regs_buff[679 + i] = rd32(E1000_TDH(i + 4));
for (i = 0; i < 12; i++)
regs_buff[691 + i] = rd32(E1000_TDT(i + 4));
for (i = 0; i < 12; i++)
regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4));
for (i = 0; i < 12; i++)
regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4));
}
if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211)
regs_buff[739] = rd32(E1000_I210_RR2DCDELAY);
}
static int igb_get_eeprom_len(struct net_device *netdev)
......
......@@ -842,6 +842,9 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
struct ixgbe_ipsec *ipsec = adapter->ipsec;
int i;
if (!ipsec)
return;
/* search rx sa table */
for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) {
if (!ipsec->rx_tbl[i].used)
......
......@@ -205,11 +205,8 @@ static void ixgbe_ptp_setup_sdp_X540(struct ixgbe_adapter *adapter)
*/
rem = (NS_PER_SEC - rem);
/* Adjust the clock edge to align with the next full second. This
* assumes that the cycle counter shift is small enough to avoid
* overflowing when shifting the remainder.
*/
clock_edge += div_u64((rem << cc->shift), cc->mult);
/* Adjust the clock edge to align with the next full second. */
clock_edge += div_u64(((u64)rem << cc->shift), cc->mult);
trgttiml = (u32)clock_edge;
trgttimh = (u32)(clock_edge >> 32);
......@@ -291,11 +288,8 @@ static void ixgbe_ptp_setup_sdp_X550(struct ixgbe_adapter *adapter)
*/
rem = (NS_PER_SEC - rem);
/* Adjust the clock edge to align with the next full second. This
* assumes that the cycle counter shift is small enough to avoid
* overflowing when shifting the remainder.
*/
clock_edge += div_u64((rem << cc->shift), cc->mult);
/* Adjust the clock edge to align with the next full second. */
clock_edge += div_u64(((u64)rem << cc->shift), cc->mult);
/* X550 hardware stores the time in 32bits of 'billions of cycles' and
* 32bits of 'cycles'. There's no guarantee that cycles represents
......
......@@ -85,22 +85,16 @@ static int ixgbevf_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed = 0;
bool link_up;
ethtool_link_ksettings_zero_link_mode(cmd, supported);
ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
cmd->base.autoneg = AUTONEG_DISABLE;
cmd->base.port = -1;
hw->mac.get_link_status = 1;
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) {
if (adapter->link_up) {
__u32 speed = SPEED_10000;
switch (link_speed) {
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
speed = SPEED_10000;
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment