Commit a6992ebe authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-09-29

This series contains updates to i40e and i40evf only.

Jake provides several of the changes starting with the renaming of a
variable to clarify what the value is actually calculating. Found we
were misusing the __I40E_RECOVERY_PENDING bit to determine when we
should actually request a new IRQ in i40e_setup_misc_vector(), which
lead to a design mistake, so to resolve the issue, use a separate
state bit for miscellaneous IRQ setup and fix up the design while we
are at it.  Cleaned up the old legacy PM support in the driver since
we support the newer generic PM callbacks.  Fixed a failure to
hibernate issue, where on some platforms with a large number of CPUs,
we would allocate many IRQ vectors which we would try to migrate to
CPU0 when hibernating.

Sudheer cleans up a check for unqualified module inside i40e_up_complete()
because the link state information is in flux at time, so log messages
are getting logged with incorrect link state information.  Also provided
additional log message cleanups and simplify member variable access in
the printing of the link messages.

Mariusz relaxes the firmware check since Fortville and Fort Park NICs
can and do have different firmware versions, so only warn for older
Fortville firmware.  Fixed an errata with a flow director statistic that
was not wrapping as expected, simply reset after reading.

Mitch prevents consternation by lowering the log level to debug on a
message seen regularly on VF reset or unload, which is meaningless under
normal circumstances.  Refactor the firmware version checking since
Fortville and Fort Park devices can have different firmware versions.

Alan fixes a ring to vector mapping, where the past implementation
attempted to map each Tx and Rx ring to its own vector, however we use
combined queues so we should be mapping the Tx/Rx rings together on one
vector.  Adds the ability for the VF to request a different number of
queues allocated to it.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 45c1fd61 22b96551
...@@ -77,6 +77,7 @@ ...@@ -77,6 +77,7 @@
#define i40e_default_queues_per_vmdq(pf) \ #define i40e_default_queues_per_vmdq(pf) \
(((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1) (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1)
#define I40E_DEFAULT_QUEUES_PER_VF 4 #define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_MAX_VF_QUEUES 16
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ #define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
#define i40e_pf_get_max_q_per_tc(pf) \ #define i40e_pf_get_max_q_per_tc(pf) \
(((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64) (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64)
...@@ -136,6 +137,7 @@ enum i40e_state_t { ...@@ -136,6 +137,7 @@ enum i40e_state_t {
__I40E_MDD_EVENT_PENDING, __I40E_MDD_EVENT_PENDING,
__I40E_VFLR_EVENT_PENDING, __I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING, __I40E_RESET_RECOVERY_PENDING,
__I40E_MISC_IRQ_REQUESTED,
__I40E_RESET_INTR_RECEIVED, __I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED, __I40E_REINIT_REQUESTED,
__I40E_PF_RESET_REQUESTED, __I40E_PF_RESET_REQUESTED,
......
...@@ -34,7 +34,15 @@ ...@@ -34,7 +34,15 @@
*/ */
#define I40E_FW_API_VERSION_MAJOR 0x0001 #define I40E_FW_API_VERSION_MAJOR 0x0001
#define I40E_FW_API_VERSION_MINOR 0x0005 #define I40E_FW_API_VERSION_MINOR_X722 0x0005
#define I40E_FW_API_VERSION_MINOR_X710 0x0007
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
I40E_FW_API_VERSION_MINOR_X722)
/* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
struct i40e_aq_desc { struct i40e_aq_desc {
__le16 flags; __le16 flags;
......
...@@ -1593,8 +1593,10 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, ...@@ -1593,8 +1593,10 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
status = I40E_ERR_UNKNOWN_PHY; status = I40E_ERR_UNKNOWN_PHY;
if (report_init) { if (report_init) {
hw->phy.phy_types = le32_to_cpu(abilities->phy_type); if (hw->mac.type == I40E_MAC_XL710 &&
hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32); hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)
status = i40e_aq_get_link_info(hw, true, NULL, NULL);
} }
return status; return status;
......
...@@ -599,6 +599,20 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, ...@@ -599,6 +599,20 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
*stat = (u32)((new_data + BIT_ULL(32)) - *offset); *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
} }
/**
* i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
* @hw: ptr to the hardware info
* @reg: the hw reg to read and clear
* @stat: ptr to the stat
**/
static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
{
u32 new_data = rd32(hw, reg);
wr32(hw, reg, 1); /* must write a nonzero value to clear register */
*stat += new_data;
}
/** /**
* i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
* @vsi: the VSI to be updated * @vsi: the VSI to be updated
...@@ -1040,18 +1054,15 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) ...@@ -1040,18 +1054,15 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_jabber, &nsd->rx_jabber); &osd->rx_jabber, &nsd->rx_jabber);
/* FDIR stats */ /* FDIR stats */
i40e_stat_update32(hw, i40e_stat_update_and_clear32(hw,
I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)), I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
pf->stat_offsets_loaded, &nsd->fd_atr_match);
&osd->fd_atr_match, &nsd->fd_atr_match); i40e_stat_update_and_clear32(hw,
i40e_stat_update32(hw, I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)), &nsd->fd_sb_match);
pf->stat_offsets_loaded, i40e_stat_update_and_clear32(hw,
&osd->fd_sb_match, &nsd->fd_sb_match); I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
i40e_stat_update32(hw, &nsd->fd_atr_tunnel_match);
I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
pf->stat_offsets_loaded,
&osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
val = rd32(hw, I40E_PRTPM_EEE_STAT); val = rd32(hw, I40E_PRTPM_EEE_STAT);
nsd->tx_lpi_status = nsd->tx_lpi_status =
...@@ -3593,14 +3604,20 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) ...@@ -3593,14 +3604,20 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
} }
/** /**
* i40e_stop_misc_vector - Stop the vector that handles non-queue events * i40e_free_misc_vector - Free the vector that handles non-queue events
* @pf: board private structure * @pf: board private structure
**/ **/
static void i40e_stop_misc_vector(struct i40e_pf *pf) static void i40e_free_misc_vector(struct i40e_pf *pf)
{ {
/* Disable ICR 0 */ /* Disable ICR 0 */
wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
i40e_flush(&pf->hw); i40e_flush(&pf->hw);
if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
synchronize_irq(pf->msix_entries[0].vector);
free_irq(pf->msix_entries[0].vector, pf);
clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
}
} }
/** /**
...@@ -4455,11 +4472,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) ...@@ -4455,11 +4472,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{ {
int i; int i;
i40e_stop_misc_vector(pf); i40e_free_misc_vector(pf);
if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
synchronize_irq(pf->msix_entries[0].vector);
free_irq(pf->msix_entries[0].vector, pf);
}
i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
I40E_IWARP_IRQ_PILE_ID); I40E_IWARP_IRQ_PILE_ID);
...@@ -5346,13 +5359,14 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) ...@@ -5346,13 +5359,14 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
{ {
enum i40e_aq_link_speed new_speed; enum i40e_aq_link_speed new_speed;
struct i40e_pf *pf = vsi->back;
char *speed = "Unknown"; char *speed = "Unknown";
char *fc = "Unknown"; char *fc = "Unknown";
char *fec = ""; char *fec = "";
char *req_fec = ""; char *req_fec = "";
char *an = ""; char *an = "";
new_speed = vsi->back->hw.phy.link_info.link_speed; new_speed = pf->hw.phy.link_info.link_speed;
if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
return; return;
...@@ -5366,13 +5380,13 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) ...@@ -5366,13 +5380,13 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
/* Warn user if link speed on NPAR enabled partition is not at /* Warn user if link speed on NPAR enabled partition is not at
* least 10GB * least 10GB
*/ */
if (vsi->back->hw.func_caps.npar_enable && if (pf->hw.func_caps.npar_enable &&
(vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
netdev_warn(vsi->netdev, netdev_warn(vsi->netdev,
"The partition detected link speed that is less than 10Gbps\n"); "The partition detected link speed that is less than 10Gbps\n");
switch (vsi->back->hw.phy.link_info.link_speed) { switch (pf->hw.phy.link_info.link_speed) {
case I40E_LINK_SPEED_40GB: case I40E_LINK_SPEED_40GB:
speed = "40 G"; speed = "40 G";
break; break;
...@@ -5395,7 +5409,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) ...@@ -5395,7 +5409,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
break; break;
} }
switch (vsi->back->hw.fc.current_mode) { switch (pf->hw.fc.current_mode) {
case I40E_FC_FULL: case I40E_FC_FULL:
fc = "RX/TX"; fc = "RX/TX";
break; break;
...@@ -5410,18 +5424,18 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) ...@@ -5410,18 +5424,18 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
break; break;
} }
if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
req_fec = ", Requested FEC: None"; req_fec = ", Requested FEC: None";
fec = ", FEC: None"; fec = ", FEC: None";
an = ", Autoneg: False"; an = ", Autoneg: False";
if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
an = ", Autoneg: True"; an = ", Autoneg: True";
if (vsi->back->hw.phy.link_info.fec_info & if (pf->hw.phy.link_info.fec_info &
I40E_AQ_CONFIG_FEC_KR_ENA) I40E_AQ_CONFIG_FEC_KR_ENA)
fec = ", FEC: CL74 FC-FEC/BASE-R"; fec = ", FEC: CL74 FC-FEC/BASE-R";
else if (vsi->back->hw.phy.link_info.fec_info & else if (pf->hw.phy.link_info.fec_info &
I40E_AQ_CONFIG_FEC_RS_ENA) I40E_AQ_CONFIG_FEC_RS_ENA)
fec = ", FEC: CL108 RS-FEC"; fec = ", FEC: CL108 RS-FEC";
...@@ -5470,15 +5484,6 @@ static int i40e_up_complete(struct i40e_vsi *vsi) ...@@ -5470,15 +5484,6 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
i40e_print_link_message(vsi, true); i40e_print_link_message(vsi, true);
netif_tx_start_all_queues(vsi->netdev); netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev); netif_carrier_on(vsi->netdev);
} else if (vsi->netdev) {
i40e_print_link_message(vsi, false);
/* need to check for qualified module here*/
if ((pf->hw.phy.link_info.link_info &
I40E_AQ_MEDIA_AVAILABLE) &&
(!(pf->hw.phy.link_info.an_info &
I40E_AQ_QUALIFIED_MODULE)))
netdev_err(vsi->netdev,
"the driver failed to link because an unqualified module was detected.");
} }
/* replay FDIR SB filters */ /* replay FDIR SB filters */
...@@ -6429,7 +6434,6 @@ static void i40e_link_event(struct i40e_pf *pf) ...@@ -6429,7 +6434,6 @@ static void i40e_link_event(struct i40e_pf *pf)
new_link == netif_carrier_ok(vsi->netdev))) new_link == netif_carrier_ok(vsi->netdev)))
return; return;
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
i40e_print_link_message(vsi, new_link); i40e_print_link_message(vsi, new_link);
/* Notify the base of the switch tree connected to /* Notify the base of the switch tree connected to
...@@ -8350,6 +8354,57 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) ...@@ -8350,6 +8354,57 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
return 0; return 0;
} }
#ifdef CONFIG_PM
/**
* i40e_restore_interrupt_scheme - Restore the interrupt scheme
* @pf: private board data structure
*
* Restore the interrupt scheme that was cleared when we suspended the
* device. This should be called during resume to re-allocate the q_vectors
* and reacquire IRQs.
*/
static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
{
int err, i;
/* We cleared the MSI and MSI-X flags when disabling the old interrupt
* scheme. We need to re-enabled them here in order to attempt to
* re-acquire the MSI or MSI-X vectors
*/
pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
err = i40e_init_interrupt_scheme(pf);
if (err)
return err;
/* Now that we've re-acquired IRQs, we need to remap the vectors and
* rings together again.
*/
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i]) {
err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
if (err)
goto err_unwind;
i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
}
}
err = i40e_setup_misc_vector(pf);
if (err)
goto err_unwind;
return 0;
err_unwind:
while (i--) {
if (pf->vsi[i])
i40e_vsi_free_q_vectors(pf->vsi[i]);
}
return err;
}
#endif /* CONFIG_PM */
/** /**
* i40e_setup_misc_vector - Setup the misc vector to handle non queue events * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
* @pf: board private structure * @pf: board private structure
...@@ -8363,13 +8418,12 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) ...@@ -8363,13 +8418,12 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
int err = 0; int err = 0;
/* Only request the irq if this is the first time through, and /* Only request the IRQ once, the first time through. */
* not when we're rebuilding after a Reset if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
*/
if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
err = request_irq(pf->msix_entries[0].vector, err = request_irq(pf->msix_entries[0].vector,
i40e_intr, 0, pf->int_name, pf); i40e_intr, 0, pf->int_name, pf);
if (err) { if (err) {
clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"request_irq for %s failed: %d\n", "request_irq for %s failed: %d\n",
pf->int_name, err); pf->int_name, err);
...@@ -11380,11 +11434,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -11380,11 +11434,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_nvm_version_str(hw)); i40e_nvm_version_str(hw));
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
dev_info(&pdev->dev, dev_info(&pdev->dev,
"The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
dev_info(&pdev->dev, dev_info(&pdev->dev,
"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
...@@ -12048,18 +12101,25 @@ static void i40e_shutdown(struct pci_dev *pdev) ...@@ -12048,18 +12101,25 @@ static void i40e_shutdown(struct pci_dev *pdev)
#ifdef CONFIG_PM #ifdef CONFIG_PM
/** /**
* i40e_suspend - PCI callback for moving to D3 * i40e_suspend - PM callback for moving to D3
* @pdev: PCI device information struct * @dev: generic device information structure
**/ **/
static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) static int i40e_suspend(struct device *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev);
struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
int retval = 0;
set_bit(__I40E_SUSPENDED, pf->state); /* If we're already suspended, then there is nothing to do */
if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
return 0;
set_bit(__I40E_DOWN, pf->state); set_bit(__I40E_DOWN, pf->state);
/* Ensure service task will not be running */
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf); i40e_enable_mc_magic_wake(pf);
...@@ -12068,81 +12128,72 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -12068,81 +12128,72 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
i40e_stop_misc_vector(pf); /* Clear the interrupt scheme and release our IRQs so that the system
if (pf->msix_entries) { * can safely hibernate even when there are a large number of CPUs.
synchronize_irq(pf->msix_entries[0].vector); * Otherwise hibernation might fail when mapping all the vectors back
free_irq(pf->msix_entries[0].vector, pf); * to CPU0.
} */
retval = pci_save_state(pdev); i40e_clear_interrupt_scheme(pf);
if (retval)
return retval;
pci_wake_from_d3(pdev, pf->wol_en);
pci_set_power_state(pdev, PCI_D3hot);
return retval; return 0;
} }
/** /**
* i40e_resume - PCI callback for waking up from D3 * i40e_resume - PM callback for waking up from D3
* @pdev: PCI device information struct * @dev: generic device information structure
**/ **/
static int i40e_resume(struct pci_dev *pdev) static int i40e_resume(struct device *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev);
struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_pf *pf = pci_get_drvdata(pdev);
u32 err; int err;
pci_set_power_state(pdev, PCI_D0); /* If we're not suspended, then there is nothing to do */
pci_restore_state(pdev); if (!test_bit(__I40E_SUSPENDED, pf->state))
/* pci_restore_state() clears dev->state_saves, so return 0;
* call pci_save_state() again to restore it.
*/
pci_save_state(pdev);
err = pci_enable_device_mem(pdev); /* We cleared the interrupt scheme when we suspended, so we need to
* restore it now to resume device functionality.
*/
err = i40e_restore_interrupt_scheme(pf);
if (err) { if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
return err; err);
} }
pci_set_master(pdev);
/* no wakeup events while running */
pci_wake_from_d3(pdev, false);
/* handling the reset will rebuild the device state */
if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
clear_bit(__I40E_DOWN, pf->state); clear_bit(__I40E_DOWN, pf->state);
if (pf->msix_entries) {
err = request_irq(pf->msix_entries[0].vector,
i40e_intr, 0, pf->int_name, pf);
if (err) {
dev_err(&pf->pdev->dev,
"request_irq for %s failed: %d\n",
pf->int_name, err);
}
}
i40e_reset_and_rebuild(pf, false, false); i40e_reset_and_rebuild(pf, false, false);
}
/* Clear suspended state last after everything is recovered */
clear_bit(__I40E_SUSPENDED, pf->state);
/* Restart the service task */
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
return 0; return 0;
} }
#endif #endif /* CONFIG_PM */
static const struct pci_error_handlers i40e_err_handler = { static const struct pci_error_handlers i40e_err_handler = {
.error_detected = i40e_pci_error_detected, .error_detected = i40e_pci_error_detected,
.slot_reset = i40e_pci_error_slot_reset, .slot_reset = i40e_pci_error_slot_reset,
.resume = i40e_pci_error_resume, .resume = i40e_pci_error_resume,
}; };
static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
static struct pci_driver i40e_driver = { static struct pci_driver i40e_driver = {
.name = i40e_driver_name, .name = i40e_driver_name,
.id_table = i40e_pci_tbl, .id_table = i40e_pci_tbl,
.probe = i40e_probe, .probe = i40e_probe,
.remove = i40e_remove, .remove = i40e_remove,
#ifdef CONFIG_PM #ifdef CONFIG_PM
.suspend = i40e_suspend, .driver = {
.resume = i40e_resume, .pm = &i40e_pm_ops,
#endif },
#endif /* CONFIG_PM */
.shutdown = i40e_shutdown, .shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler, .err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure, .sriov_configure = i40e_pci_sriov_configure,
......
...@@ -960,14 +960,14 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -960,14 +960,14 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{ {
enum i40e_latency_range new_latency_range = rc->latency_range; enum i40e_latency_range new_latency_range = rc->latency_range;
u32 new_itr = rc->itr; u32 new_itr = rc->itr;
int bytes_per_int; int bytes_per_usec;
unsigned int usecs, estimated_usecs; unsigned int usecs, estimated_usecs;
if (rc->total_packets == 0 || !rc->itr) if (rc->total_packets == 0 || !rc->itr)
return false; return false;
usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
bytes_per_int = rc->total_bytes / usecs; bytes_per_usec = rc->total_bytes / usecs;
/* The calculations in this algorithm depend on interrupts actually /* The calculations in this algorithm depend on interrupts actually
* firing at the ITR rate. This may not happen if the packet rate is * firing at the ITR rate. This may not happen if the packet rate is
...@@ -993,18 +993,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -993,18 +993,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
*/ */
switch (new_latency_range) { switch (new_latency_range) {
case I40E_LOWEST_LATENCY: case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10) if (bytes_per_usec > 10)
new_latency_range = I40E_LOW_LATENCY; new_latency_range = I40E_LOW_LATENCY;
break; break;
case I40E_LOW_LATENCY: case I40E_LOW_LATENCY:
if (bytes_per_int > 20) if (bytes_per_usec > 20)
new_latency_range = I40E_BULK_LATENCY; new_latency_range = I40E_BULK_LATENCY;
else if (bytes_per_int <= 10) else if (bytes_per_usec <= 10)
new_latency_range = I40E_LOWEST_LATENCY; new_latency_range = I40E_LOWEST_LATENCY;
break; break;
case I40E_BULK_LATENCY: case I40E_BULK_LATENCY:
default: default:
if (bytes_per_int <= 20) if (bytes_per_usec <= 20)
new_latency_range = I40E_LOW_LATENCY; new_latency_range = I40E_LOW_LATENCY;
break; break;
} }
......
...@@ -815,6 +815,14 @@ static void i40e_free_vf_res(struct i40e_vf *vf) ...@@ -815,6 +815,14 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
*/ */
clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
/* It's possible the VF had requeuested more queues than the default so
* do the accounting here when we're about to free them.
*/
if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
pf->queues_left += vf->num_queue_pairs -
I40E_DEFAULT_QUEUES_PER_VF;
}
/* free vsi & disconnect it from the parent uplink */ /* free vsi & disconnect it from the parent uplink */
if (vf->lan_vsi_idx) { if (vf->lan_vsi_idx) {
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
...@@ -868,12 +876,27 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) ...@@ -868,12 +876,27 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
int total_queue_pairs = 0; int total_queue_pairs = 0;
int ret; int ret;
if (vf->num_req_queues &&
vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
pf->num_vf_qps = vf->num_req_queues;
else
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
/* allocate hw vsi context & associated resources */ /* allocate hw vsi context & associated resources */
ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
if (ret) if (ret)
goto error_alloc; goto error_alloc;
total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
/* We account for each VF to get a default number of queue pairs. If
* the VF has now requested more, we need to account for that to make
* certain we never request more queues than we actually have left in
* HW.
*/
if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
pf->queues_left -=
total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
if (vf->trusted) if (vf->trusted)
set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else else
...@@ -1579,6 +1602,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ...@@ -1579,6 +1602,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
} }
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
vfres->num_vsis = num_vsis; vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs; vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
...@@ -1986,6 +2012,52 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1986,6 +2012,52 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret); aq_ret);
} }
/**
* i40e_vc_request_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* VFs get a default number of queues but can use this message to request a
* different number. Will respond with either the number requested or the
* maximum we can support.
**/
static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
{
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
int req_pairs = vfres->num_queue_pairs;
int cur_pairs = vf->num_queue_pairs;
struct i40e_pf *pf = vf->pf;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
return -EINVAL;
if (req_pairs <= 0) {
dev_err(&pf->pdev->dev,
"VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_pairs);
} else if (req_pairs > I40E_MAX_VF_QUEUES) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
vf->vf_id,
I40E_MAX_VF_QUEUES);
vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
} else if (req_pairs - cur_pairs > pf->queues_left) {
dev_warn(&pf->pdev->dev,
"VF %d requested %d more queues, but only %d left.\n",
vf->vf_id,
req_pairs - cur_pairs,
pf->queues_left);
vfres->num_queue_pairs = pf->queues_left + cur_pairs;
} else {
vf->num_req_queues = req_pairs;
}
return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
(u8 *)vfres, sizeof(vfres));
}
/** /**
* i40e_vc_get_stats_msg * i40e_vc_get_stats_msg
* @vf: pointer to the VF info * @vf: pointer to the VF info
...@@ -2708,6 +2780,9 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, ...@@ -2708,6 +2780,9 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen); ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
break; break;
case VIRTCHNL_OP_REQUEST_QUEUES:
ret = i40e_vc_request_queues_msg(vf, msg, msglen);
break;
case VIRTCHNL_OP_UNKNOWN: case VIRTCHNL_OP_UNKNOWN:
default: default:
......
...@@ -97,6 +97,7 @@ struct i40e_vf { ...@@ -97,6 +97,7 @@ struct i40e_vf {
u16 lan_vsi_id; /* ID as used by firmware */ u16 lan_vsi_id; /* ID as used by firmware */
u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u8 num_req_queues; /* num of requested qps */
u64 num_mdd_events; /* num of mdd events detected */ u64 num_mdd_events; /* num of mdd events detected */
/* num of continuous malformed or invalid msgs detected */ /* num of continuous malformed or invalid msgs detected */
u64 num_invalid_msgs; u64 num_invalid_msgs;
......
...@@ -34,7 +34,15 @@ ...@@ -34,7 +34,15 @@
*/ */
#define I40E_FW_API_VERSION_MAJOR 0x0001 #define I40E_FW_API_VERSION_MAJOR 0x0001
#define I40E_FW_API_VERSION_MINOR 0x0005 #define I40E_FW_API_VERSION_MINOR_X722 0x0005
#define I40E_FW_API_VERSION_MINOR_X710 0x0007
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
I40E_FW_API_VERSION_MINOR_X722)
/* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
struct i40e_aq_desc { struct i40e_aq_desc {
__le16 flags; __le16 flags;
......
...@@ -358,14 +358,14 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -358,14 +358,14 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{ {
enum i40e_latency_range new_latency_range = rc->latency_range; enum i40e_latency_range new_latency_range = rc->latency_range;
u32 new_itr = rc->itr; u32 new_itr = rc->itr;
int bytes_per_int; int bytes_per_usec;
unsigned int usecs, estimated_usecs; unsigned int usecs, estimated_usecs;
if (rc->total_packets == 0 || !rc->itr) if (rc->total_packets == 0 || !rc->itr)
return false; return false;
usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
bytes_per_int = rc->total_bytes / usecs; bytes_per_usec = rc->total_bytes / usecs;
/* The calculations in this algorithm depend on interrupts actually /* The calculations in this algorithm depend on interrupts actually
* firing at the ITR rate. This may not happen if the packet rate is * firing at the ITR rate. This may not happen if the packet rate is
...@@ -391,18 +391,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) ...@@ -391,18 +391,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
*/ */
switch (new_latency_range) { switch (new_latency_range) {
case I40E_LOWEST_LATENCY: case I40E_LOWEST_LATENCY:
if (bytes_per_int > 10) if (bytes_per_usec > 10)
new_latency_range = I40E_LOW_LATENCY; new_latency_range = I40E_LOW_LATENCY;
break; break;
case I40E_LOW_LATENCY: case I40E_LOW_LATENCY:
if (bytes_per_int > 20) if (bytes_per_usec > 20)
new_latency_range = I40E_BULK_LATENCY; new_latency_range = I40E_BULK_LATENCY;
else if (bytes_per_int <= 10) else if (bytes_per_usec <= 10)
new_latency_range = I40E_LOWEST_LATENCY; new_latency_range = I40E_LOWEST_LATENCY;
break; break;
case I40E_BULK_LATENCY: case I40E_BULK_LATENCY:
default: default:
if (bytes_per_int <= 20) if (bytes_per_usec <= 20)
new_latency_range = I40E_LOW_LATENCY; new_latency_range = I40E_LOW_LATENCY;
break; break;
} }
......
...@@ -46,7 +46,7 @@ static const char i40evf_driver_string[] = ...@@ -46,7 +46,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 3 #define DRV_VERSION_MAJOR 3
#define DRV_VERSION_MINOR 0 #define DRV_VERSION_MINOR 0
#define DRV_VERSION_BUILD 0 #define DRV_VERSION_BUILD 1
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \ __stringify(DRV_VERSION_BUILD) \
...@@ -432,52 +432,24 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) ...@@ -432,52 +432,24 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
**/ **/
static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
{ {
int rings_remaining = adapter->num_active_queues;
int ridx = 0, vidx = 0;
int q_vectors; int q_vectors;
int v_start = 0;
int rxr_idx = 0, txr_idx = 0;
int rxr_remaining = adapter->num_active_queues;
int txr_remaining = adapter->num_active_queues;
int i, j;
int rqpv, tqpv;
int err = 0; int err = 0;
q_vectors = adapter->num_msix_vectors - NONQ_VECS; q_vectors = adapter->num_msix_vectors - NONQ_VECS;
/* The ideal configuration... for (; ridx < rings_remaining; ridx++) {
* We have enough vectors to map one per queue. i40evf_map_vector_to_rxq(adapter, vidx, ridx);
*/ i40evf_map_vector_to_txq(adapter, vidx, ridx);
if (q_vectors >= (rxr_remaining * 2)) {
for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
for (; txr_idx < txr_remaining; v_start++, txr_idx++)
i40evf_map_vector_to_txq(adapter, v_start, txr_idx);
goto out;
}
/* If we don't have enough vectors for a 1-to-1 /* In the case where we have more queues than vectors, continue
* mapping, we'll have to group them so there are * round-robin on vectors until all queues are mapped.
* multiple queues per vector.
* Re-adjusting *qpv takes care of the remainder.
*/ */
for (i = v_start; i < q_vectors; i++) { if (++vidx >= q_vectors)
rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); vidx = 0;
for (j = 0; j < rqpv; j++) {
i40evf_map_vector_to_rxq(adapter, i, rxr_idx);
rxr_idx++;
rxr_remaining--;
}
}
for (i = v_start; i < q_vectors; i++) {
tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
for (j = 0; j < tqpv; j++) {
i40evf_map_vector_to_txq(adapter, i, txr_idx);
txr_idx++;
txr_remaining--;
}
} }
out:
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
return err; return err;
......
...@@ -52,7 +52,7 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, ...@@ -52,7 +52,7 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
if (err) if (err)
dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
op, i40evf_stat_str(hw, err), op, i40evf_stat_str(hw, err),
i40evf_aq_str(hw, hw->aq.asq_last_status)); i40evf_aq_str(hw, hw->aq.asq_last_status));
return err; return err;
......
...@@ -135,6 +135,7 @@ enum virtchnl_ops { ...@@ -135,6 +135,7 @@ enum virtchnl_ops {
VIRTCHNL_OP_SET_RSS_HENA = 26, VIRTCHNL_OP_SET_RSS_HENA = 26,
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
VIRTCHNL_OP_REQUEST_QUEUES = 29,
}; };
/* This macro is used to generate a compilation error if a structure /* This macro is used to generate a compilation error if a structure
...@@ -235,6 +236,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); ...@@ -235,6 +236,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 #define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
...@@ -325,6 +327,21 @@ struct virtchnl_vsi_queue_config_info { ...@@ -325,6 +327,21 @@ struct virtchnl_vsi_queue_config_info {
struct virtchnl_queue_pair_info qpair[1]; struct virtchnl_queue_pair_info qpair[1];
}; };
/* VIRTCHNL_OP_REQUEST_QUEUES
* VF sends this message to request the PF to allocate additional queues to
* this VF. Each VF gets a guaranteed number of queues on init but asking for
* additional queues must be negotiated. This is a best effort request as it
* is possible the PF does not have enough queues left to support the request.
* If the PF cannot support the number requested it will respond with the
* maximum number it is able to support; otherwise it will respond with the
* number requested.
*/
/* VF resource request */
struct virtchnl_vf_res_request {
u16 num_queue_pairs;
};
VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
/* VIRTCHNL_OP_CONFIG_IRQ_MAP /* VIRTCHNL_OP_CONFIG_IRQ_MAP
...@@ -691,6 +708,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, ...@@ -691,6 +708,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
break; break;
case VIRTCHNL_OP_REQUEST_QUEUES:
valid_len = sizeof(struct virtchnl_vf_res_request);
break;
/* These are always errors coming from the VF. */ /* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN: case VIRTCHNL_OP_UNKNOWN:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment