Commit a77f9a28 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2014-09-04

This series contains updates to i40e, i40evf, ixgbe and ixgbevf.

Catherine adds dual speed module support to i40e.  Updates i40e to allow
the user to change link settings when the link is down.

Serey renames i40e_ndo_set_vf_spoofck() to i40e_ndo_set_vf_spookchk()
to be more consistent with what is defined in netdev and removes a
unnecessary variable assignment.

Jesse makes a malicious driver detection warning only print if extended
driver string is enabled for i40e.  Fixes a panic under traffic load when
resetting or if/whenever there was a Tx-timeout because we were enabling
the Tx queue to early.

Anjali fixes an issue when PF reset fails, where we were trying to restart
the admin queue which has not been setup at that point.  This resolves an
occasional kernel panic when PF reset fails for some reason.

Ethan Zhao replaces the use of a local i40e_vfs_are_assigned() with the
global kernel pci_vfs_assigned() for i40e.

Alex cleans up the FDB handling for ixgbe.  This change makes it so that
the behavior for FDB handling is consistent between both the SR-IOV and
non-SR-IOV cases.  The main change is that we perform bounds checking on
the number of SR-IOV addresses regardless of if SR-IOV is enabled or not
as we can only support a certain number of addresses in the hardware.

Emil extends the pending Tx work check to the VF interfaces, where the
driver initiates a reset of the interface on link loss with pending Tx
work in order to clear the rings.  Introduces a delay for 82599 VFs of
at least 500 usecs to make sure the VFLINKS value is correct, since this
bit tends to flap when a DA or SFP+ cable is disconnected.

Jacob adds code comments in ixgbe to make it more obvious that we are
resetting features based on the fact that we do not have MSI-X enabled,
and cannot use the previous settings.  Also resolves a kernel NULL
pointer dereference by limiting the combined total of MACVLAN and
SR-IOV VFs, since the hardware has a limited number of pools available
(64).  Previously, no checks were in place to limit the number of
accelerated MACVLAN devices based on the number of pools, which would
be ok since there was already a limit for these well below the number of
available pools.  However, SR-IOV uses the very same pools, therefore
we need to ensure that the total number of pools does not exceed the
number of pools available in the hardware.

v2:
 - clean up code comment in patch 5 by replacing "an" with "auto
   negotiation" based on feedback from Sergei Shtylyov
 - removed un-necessary parenthesis around function call in patch 8
   based on feedback from Sergei Shtylyov
====================
parents c2b32e58 aac2f1bf
......@@ -145,6 +145,7 @@ enum i40e_state_t {
__I40E_BAD_EEPROM,
__I40E_DOWN_REQUESTED,
__I40E_FD_FLUSH_REQUESTED,
__I40E_RESET_FAILED,
};
enum i40e_interrupt_policy {
......
......@@ -752,6 +752,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
switch (hw->phy.link_info.phy_type) {
case I40E_PHY_TYPE_10GBASE_SR:
case I40E_PHY_TYPE_10GBASE_LR:
case I40E_PHY_TYPE_1000BASE_SX:
case I40E_PHY_TYPE_1000BASE_LX:
case I40E_PHY_TYPE_40GBASE_SR4:
case I40E_PHY_TYPE_40GBASE_LR4:
media = I40E_MEDIA_TYPE_FIBER;
......
......@@ -313,7 +313,10 @@ static int i40e_get_settings(struct net_device *netdev,
break;
case I40E_PHY_TYPE_10GBASE_SR:
case I40E_PHY_TYPE_10GBASE_LR:
case I40E_PHY_TYPE_1000BASE_SX:
case I40E_PHY_TYPE_1000BASE_LX:
ecmd->supported = SUPPORTED_10000baseT_Full;
ecmd->supported |= SUPPORTED_1000baseT_Full;
break;
case I40E_PHY_TYPE_10GBASE_CR1_CU:
case I40E_PHY_TYPE_10GBASE_CR1:
......@@ -352,7 +355,8 @@ static int i40e_get_settings(struct net_device *netdev,
break;
default:
/* if we got here and link is up something bad is afoot */
WARN_ON(link_up);
netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
hw_link_info->phy_type);
}
no_valid_phy_type:
......@@ -462,7 +466,8 @@ static int i40e_set_settings(struct net_device *netdev,
if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE)
hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
return -EOPNOTSUPP;
/* get our own copy of the bits to check against */
......@@ -493,11 +498,10 @@ static int i40e_set_settings(struct net_device *netdev,
if (status)
return -EAGAIN;
/* Copy link_speed and abilities to config in case they are not
/* Copy abilities to config in case autoneg is not
* set below
*/
memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
config.link_speed = abilities.link_speed;
config.abilities = abilities.abilities;
/* Check autoneg */
......@@ -534,42 +538,38 @@ static int i40e_set_settings(struct net_device *netdev,
return -EINVAL;
if (advertise & ADVERTISED_100baseT_Full)
if (!(abilities.link_speed & I40E_LINK_SPEED_100MB)) {
config.link_speed |= I40E_LINK_SPEED_100MB;
change = true;
}
if (advertise & ADVERTISED_1000baseT_Full ||
advertise & ADVERTISED_1000baseKX_Full)
if (!(abilities.link_speed & I40E_LINK_SPEED_1GB)) {
config.link_speed |= I40E_LINK_SPEED_1GB;
change = true;
}
if (advertise & ADVERTISED_10000baseT_Full ||
advertise & ADVERTISED_10000baseKX4_Full ||
advertise & ADVERTISED_10000baseKR_Full)
if (!(abilities.link_speed & I40E_LINK_SPEED_10GB)) {
config.link_speed |= I40E_LINK_SPEED_10GB;
change = true;
}
if (advertise & ADVERTISED_40000baseKR4_Full ||
advertise & ADVERTISED_40000baseCR4_Full ||
advertise & ADVERTISED_40000baseSR4_Full ||
advertise & ADVERTISED_40000baseLR4_Full)
if (!(abilities.link_speed & I40E_LINK_SPEED_40GB)) {
config.link_speed |= I40E_LINK_SPEED_40GB;
change = true;
}
if (change) {
if (change || (abilities.link_speed != config.link_speed)) {
/* copy over the rest of the abilities */
config.phy_type = abilities.phy_type;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
/* If link is up set link and an so changes take effect */
if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
/* set link and auto negotiation so changes take effect */
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
/* If link is up put link down */
if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
/* Tell the OS link is going down, the link will go
* back up when fw says it is ready asynchronously
*/
netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n");
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
}
/* make the aq call */
status = i40e_aq_set_phy_config(hw, &config, NULL);
......@@ -686,6 +686,13 @@ static int i40e_set_pauseparam(struct net_device *netdev,
else
return -EINVAL;
/* Tell the OS link is going down, the link will go back up when fw
* says it is ready asynchronously
*/
netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n");
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
/* Set the fc mode and only restart an if link is up*/
status = i40e_set_fc(hw, &aq_failures, link_up);
......
......@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 0
#define DRV_VERSION_BUILD 4
#define DRV_VERSION_BUILD 11
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
......@@ -5289,7 +5289,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
**/
static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
{
if (!vsi)
if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
return;
switch (vsi->type) {
......@@ -5568,6 +5568,10 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
u32 oldval;
u32 val;
/* Do not run clean AQ when PF reset fails */
if (test_bit(__I40E_RESET_FAILED, &pf->state))
return;
/* check for error indications */
val = rd32(&pf->hw, pf->hw.aq.arq.len);
oldval = val;
......@@ -5973,19 +5977,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
ret = i40e_pf_reset(hw);
if (ret) {
dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
goto end_core_reset;
set_bit(__I40E_RESET_FAILED, &pf->state);
goto clear_recovery;
}
pf->pfr_count++;
if (test_bit(__I40E_DOWN, &pf->state))
goto end_core_reset;
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
if (ret) {
dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
goto end_core_reset;
goto clear_recovery;
}
/* re-verify the eeprom if we just had an EMP reset */
......@@ -6103,6 +6108,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
i40e_send_version(pf);
end_core_reset:
clear_bit(__I40E_RESET_FAILED, &pf->state);
clear_recovery:
clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
}
......@@ -6148,8 +6155,8 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
I40E_GL_MDET_TX_EVENT_SHIFT;
u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
I40E_GL_MDET_TX_QUEUE_SHIFT;
dev_info(&pf->pdev->dev,
"Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
if (netif_msg_tx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
event, queue, pf_num, vf_num);
wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
mdd_detected = true;
......@@ -6162,8 +6169,8 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
I40E_GL_MDET_RX_EVENT_SHIFT;
u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
I40E_GL_MDET_RX_QUEUE_SHIFT;
dev_info(&pf->pdev->dev,
"Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
if (netif_msg_rx_err(pf))
dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
event, queue, func);
wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
mdd_detected = true;
......@@ -6173,17 +6180,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
reg = rd32(hw, I40E_PF_MDET_TX);
if (reg & I40E_PF_MDET_TX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
dev_info(&pf->pdev->dev,
"MDD TX event is for this function 0x%08x, requesting PF reset.\n",
reg);
dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
pf_mdd_detected = true;
}
reg = rd32(hw, I40E_PF_MDET_RX);
if (reg & I40E_PF_MDET_RX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
dev_info(&pf->pdev->dev,
"MDD RX event is for this function 0x%08x, requesting PF reset.\n",
reg);
dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
pf_mdd_detected = true;
}
/* Queue belongs to the PF, initiate a reset */
......@@ -6200,14 +6203,16 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
if (reg & I40E_VP_MDET_TX_VALID_MASK) {
wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
vf->num_mdd_events++;
dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
reg = rd32(hw, I40E_VP_MDET_RX(i));
if (reg & I40E_VP_MDET_RX_VALID_MASK) {
wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
vf->num_mdd_events++;
dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
i);
}
if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
......@@ -7469,7 +7474,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
#ifdef CONFIG_I40E_VXLAN
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
......
......@@ -707,35 +707,6 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
i40e_flush(hw);
}
/**
* i40e_vfs_are_assigned
* @pf: pointer to the pf structure
*
* Determine if any VFs are assigned to VMs
**/
static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
{
struct pci_dev *pdev = pf->pdev;
struct pci_dev *vfdev;
/* loop through all the VFs to see if we own any that are assigned */
vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL);
while (vfdev) {
/* if we don't own it we don't care */
if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
/* if it is assigned we cannot release it */
if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
return true;
}
vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
I40E_DEV_ID_VF,
vfdev);
}
return false;
}
#ifdef CONFIG_PCI_IOV
/**
......@@ -843,7 +814,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
* assigned. Setting the number of VFs to 0 through sysfs is caught
* before this function ever gets called.
*/
if (!i40e_vfs_are_assigned(pf)) {
if (!pci_vfs_assigned(pf->pdev)) {
pci_disable_sriov(pf->pdev);
/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
* work correctly when SR-IOV gets re-enabled.
......@@ -980,7 +951,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs)
return i40e_pci_sriov_enable(pdev, num_vfs);
if (!i40e_vfs_are_assigned(pf)) {
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
......@@ -2098,7 +2069,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* Force the VF driver stop so it has to reload with new MAC address */
i40e_vc_disable_vf(pf, vf);
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
ret = 0;
error_param:
return ret;
......@@ -2423,7 +2393,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
*
* Enable or disable VF spoof checking
**/
int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable)
int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
......
......@@ -122,7 +122,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable);
int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
......
......@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver";
#define DRV_VERSION "1.0.1"
#define DRV_VERSION "1.0.5"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
......
......@@ -1086,6 +1086,11 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
return;
}
/* At this point, we do not have MSI-X capabilities. We need to
* reconfigure or disable various features which require MSI-X
* capability.
*/
/* disable DCB if number of TCs exceeds 1 */
if (netdev_get_num_tc(adapter->netdev) > 1) {
e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
......@@ -1107,6 +1112,9 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
/* disable RSS */
adapter->ring_feature[RING_F_RSS].limit = 1;
/* recalculate number of queues now that many features have been
* changed or disabled.
*/
ixgbe_set_num_queues(adapter);
adapter->num_q_vectors = 1;
......
......@@ -6319,25 +6319,55 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
ixgbe_ping_all_vfs(adapter);
}
/**
* ixgbe_watchdog_flush_tx - flush queues on link down
* @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
{
int i;
int some_tx_pending = 0;
if (!netif_carrier_ok(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
if (tx_ring->next_to_use != tx_ring->next_to_clean) {
some_tx_pending = 1;
break;
if (tx_ring->next_to_use != tx_ring->next_to_clean)
return true;
}
return false;
}
static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
int i, j;
if (!adapter->num_vfs)
return false;
for (i = 0; i < adapter->num_vfs; i++) {
for (j = 0; j < q_per_pool; j++) {
u32 h, t;
h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
if (h != t)
return true;
}
}
if (some_tx_pending) {
return false;
}
/**
* ixgbe_watchdog_flush_tx - flush queues on link down
* @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
{
if (!netif_carrier_ok(adapter->netdev)) {
if (ixgbe_ring_tx_pending(adapter) ||
ixgbe_vf_tx_pending(adapter)) {
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
......@@ -7741,39 +7771,13 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
const unsigned char *addr,
u16 flags)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int err;
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
/* Hardware does not support aging addresses so if a
* ndm_state is given only allow permanent addresses
*/
if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
pr_info("%s: FDB only supports static addresses\n",
ixgbe_driver_name);
return -EINVAL;
}
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
if (netdev_uc_count(dev) < rar_uc_entries)
err = dev_uc_add_excl(dev, addr);
else
err = -ENOMEM;
} else if (is_multicast_ether_addr(addr)) {
err = dev_mc_add_excl(dev, addr);
} else {
err = -EINVAL;
if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev))
return -ENOMEM;
}
/* Only return duplicate errors if NLM_F_EXCL is set */
if (err == -EEXIST && !(flags & NLM_F_EXCL))
err = 0;
return err;
return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
}
static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
......@@ -7836,9 +7840,17 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
{
struct ixgbe_fwd_adapter *fwd_adapter = NULL;
struct ixgbe_adapter *adapter = netdev_priv(pdev);
int used_pools = adapter->num_vfs + adapter->num_rx_pools;
unsigned int limit;
int pool, err;
/* Hardware has a limited number of available pools. Each VF, and the
* PF require a pool. Check to ensure we don't attempt to use more
* then the available number of pools.
*/
if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
return ERR_PTR(-EINVAL);
#ifdef CONFIG_RPS
if (vdev->num_rx_queues != vdev->num_tx_queues) {
netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
......
......@@ -250,13 +250,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
if (err)
return err;
/* While the SR-IOV capability structure reports total VFs to be
* 64 we limit the actual number that can be allocated to 63 so
* that some transmit/receive resources can be reserved to the
* PF. The PCI bus driver already checks for other values out of
* range.
/* While the SR-IOV capability structure reports total VFs to be 64,
* we have to limit the actual number allocated based on two factors.
* First, we reserve some transmit/receive resources for the PF.
* Second, VMDQ also uses the same pools that SR-IOV does. We need to
* account for this, so that we don't accidentally allocate more VFs
* than we have available pools. The PCI bus driver already checks for
* other values out of range.
*/
if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT)
if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS)
return -EPERM;
adapter->num_vfs = num_vfs;
......
......@@ -2194,6 +2194,8 @@ enum {
#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600))
#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
/* Translated register #defines */
#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
......@@ -2202,6 +2204,11 @@ enum {
#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
(IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
#define IXGBE_PVFTDHN(q_per_pool, vf_number, vf_q_index) \
(IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index)))
#define IXGBE_PVFTDTN(q_per_pool, vf_number, vf_q_index) \
(IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index)))
enum ixgbe_fdir_pballoc_type {
IXGBE_FDIR_PBALLOC_NONE = 0,
IXGBE_FDIR_PBALLOC_64K = 1,
......
......@@ -434,6 +434,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
if (!(links_reg & IXGBE_LINKS_UP))
goto out;
/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
* before the link status is correct
*/
if (mac->type == ixgbe_mac_82599_vf) {
int i;
for (i = 0; i < 5; i++) {
udelay(100);
links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
if (!(links_reg & IXGBE_LINKS_UP))
goto out;
}
}
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment