Commit 2018b22a authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2022-09-06 (i40e, iavf)

This series contains updates to i40e and iavf drivers.

Stanislaw adds support for new device id for i40e.

Jaroslaw tidies up some code around MSI-X configuration by adding/
reworking comments and introducing a couple of macros for i40e.

Michal resolves some races around reset and close by deferring and deleting
some pending AdminQ operations and reworking filter additions and deletions
during these operations for iavf.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 29796143 11c12adc
...@@ -399,6 +399,20 @@ struct i40e_ddp_old_profile_list { ...@@ -399,6 +399,20 @@ struct i40e_ddp_old_profile_list {
I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \ I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \
I40E_FLEX_56_MASK | I40E_FLEX_57_MASK) I40E_FLEX_56_MASK | I40E_FLEX_57_MASK)
#define I40E_QINT_TQCTL_VAL(qp, vector, nextq_type) \
(I40E_QINT_TQCTL_CAUSE_ENA_MASK | \
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | \
((vector) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | \
((qp) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | \
(I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT))
#define I40E_QINT_RQCTL_VAL(qp, vector, nextq_type) \
(I40E_QINT_RQCTL_CAUSE_ENA_MASK | \
(I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | \
((vector) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | \
((qp) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | \
(I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT))
struct i40e_flex_pit { struct i40e_flex_pit {
struct list_head list; struct list_head list;
u16 src_offset; u16 src_offset;
......
...@@ -27,6 +27,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) ...@@ -27,6 +27,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_10G_BASE_T4:
...@@ -4974,6 +4975,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, ...@@ -4974,6 +4975,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
status = i40e_write_phy_register_clause22(hw, reg, phy_addr, status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
value); value);
break; break;
case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_10G_BASE_T4:
...@@ -5012,6 +5014,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw, ...@@ -5012,6 +5014,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
status = i40e_read_phy_register_clause22(hw, reg, phy_addr, status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
value); value);
break; break;
case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_10G_BASE_T4:
......
...@@ -24,8 +24,10 @@ ...@@ -24,8 +24,10 @@
#define I40E_DEV_ID_10G_B 0x104F #define I40E_DEV_ID_10G_B 0x104F
#define I40E_DEV_ID_10G_SFP 0x104E #define I40E_DEV_ID_10G_SFP 0x104E
#define I40E_DEV_ID_5G_BASE_T_BC 0x101F #define I40E_DEV_ID_5G_BASE_T_BC 0x101F
#define I40E_DEV_ID_1G_BASE_T_BC 0x0DD2
#define I40E_IS_X710TL_DEVICE(d) \ #define I40E_IS_X710TL_DEVICE(d) \
(((d) == I40E_DEV_ID_5G_BASE_T_BC) || \ (((d) == I40E_DEV_ID_1G_BASE_T_BC) || \
((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
((d) == I40E_DEV_ID_10G_BASE_T_BC)) ((d) == I40E_DEV_ID_10G_BASE_T_BC))
#define I40E_DEV_ID_KX_X722 0x37CE #define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF #define I40E_DEV_ID_QSFP_X722 0x37CF
......
...@@ -66,6 +66,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { ...@@ -66,6 +66,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
...@@ -3878,7 +3879,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) ...@@ -3878,7 +3879,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_PFINT_RATEN(vector - 1), wr32(hw, I40E_PFINT_RATEN(vector - 1),
i40e_intrl_usec_to_reg(vsi->int_rate_limit)); i40e_intrl_usec_to_reg(vsi->int_rate_limit));
/* Linked list for the queuepairs assigned to this vector */ /* begin of linked list for RX queue assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
for (q = 0; q < q_vector->num_ringpairs; q++) { for (q = 0; q < q_vector->num_ringpairs; q++) {
u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp; u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
...@@ -3894,6 +3895,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) ...@@ -3894,6 +3895,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_QINT_RQCTL(qp), val); wr32(hw, I40E_QINT_RQCTL(qp), val);
if (has_xdp) { if (has_xdp) {
/* TX queue with next queue set to TX */
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
...@@ -3903,7 +3905,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) ...@@ -3903,7 +3905,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_QINT_TQCTL(nextqp), val); wr32(hw, I40E_QINT_TQCTL(nextqp), val);
} }
/* TX queue with next RX or end of linked list */
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
...@@ -3972,7 +3974,6 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) ...@@ -3972,7 +3974,6 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
struct i40e_q_vector *q_vector = vsi->q_vectors[0]; struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
u32 val;
/* set the ITR configuration */ /* set the ITR configuration */
q_vector->rx.next_update = jiffies + 1; q_vector->rx.next_update = jiffies + 1;
...@@ -3989,28 +3990,20 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) ...@@ -3989,28 +3990,20 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
wr32(hw, I40E_PFINT_LNKLST0, 0); wr32(hw, I40E_PFINT_LNKLST0, 0);
/* Associate the queue pair to the vector and enable the queue int */ /* Associate the queue pair to the vector and enable the queue
val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | * interrupt RX queue in linked list with next queue set to TX
(I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | */
(nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
(I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_RQCTL(0), val);
if (i40e_enabled_xdp_vsi(vsi)) { if (i40e_enabled_xdp_vsi(vsi)) {
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | /* TX queue in linked list with next queue set to TX */
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)| wr32(hw, I40E_QINT_TQCTL(nextqp),
(I40E_QUEUE_TYPE_TX I40E_QINT_TQCTL_VAL(nextqp, 0, TX));
<< I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
wr32(hw, I40E_QINT_TQCTL(nextqp), val);
} }
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | /* last TX queue so the next RX queue doesn't matter */
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | wr32(hw, I40E_QINT_TQCTL(0),
(I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX));
wr32(hw, I40E_QINT_TQCTL(0), val);
i40e_flush(hw); i40e_flush(hw);
} }
......
...@@ -1270,66 +1270,138 @@ static void iavf_up_complete(struct iavf_adapter *adapter) ...@@ -1270,66 +1270,138 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
} }
/** /**
* iavf_down - Shutdown the connection processing * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
* yet and mark other to be removed.
* @adapter: board private structure * @adapter: board private structure
*
* Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
**/ **/
void iavf_down(struct iavf_adapter *adapter) static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct iavf_vlan_filter *vlf, *vlftmp;
struct iavf_vlan_filter *vlf; struct iavf_mac_filter *f, *ftmp;
struct iavf_cloud_filter *cf;
struct iavf_fdir_fltr *fdir;
struct iavf_mac_filter *f;
struct iavf_adv_rss *rss;
if (adapter->state <= __IAVF_DOWN_PENDING)
return;
netif_carrier_off(netdev);
netif_tx_disable(netdev);
adapter->link_up = false;
iavf_napi_disable_all(adapter);
iavf_irq_disable(adapter);
spin_lock_bh(&adapter->mac_vlan_list_lock); spin_lock_bh(&adapter->mac_vlan_list_lock);
/* clear the sync flag on all filters */ /* clear the sync flag on all filters */
__dev_uc_unsync(adapter->netdev, NULL); __dev_uc_unsync(adapter->netdev, NULL);
__dev_mc_unsync(adapter->netdev, NULL); __dev_mc_unsync(adapter->netdev, NULL);
/* remove all MAC filters */ /* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) { list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
list) {
if (f->add) {
list_del(&f->list);
kfree(f);
} else {
f->remove = true; f->remove = true;
} }
}
/* remove all VLAN filters */ /* remove all VLAN filters */
list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
list) {
if (vlf->add) {
list_del(&vlf->list);
kfree(vlf);
} else {
vlf->remove = true; vlf->remove = true;
} }
}
spin_unlock_bh(&adapter->mac_vlan_list_lock); spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
* iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
* mark other to be removed.
* @adapter: board private structure
**/
static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
{
struct iavf_cloud_filter *cf, *cftmp;
/* remove all cloud filters */ /* remove all cloud filters */
spin_lock_bh(&adapter->cloud_filter_list_lock); spin_lock_bh(&adapter->cloud_filter_list_lock);
list_for_each_entry(cf, &adapter->cloud_filter_list, list) { list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
list) {
if (cf->add) {
list_del(&cf->list);
kfree(cf);
adapter->num_cloud_filters--;
} else {
cf->del = true; cf->del = true;
} }
}
spin_unlock_bh(&adapter->cloud_filter_list_lock); spin_unlock_bh(&adapter->cloud_filter_list_lock);
}
/**
* iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
* other to be removed.
* @adapter: board private structure
**/
static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
{
struct iavf_fdir_fltr *fdir, *fdirtmp;
/* remove all Flow Director filters */ /* remove all Flow Director filters */
spin_lock_bh(&adapter->fdir_fltr_lock); spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fdir, &adapter->fdir_list_head, list) { list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
list_del(&fdir->list);
kfree(fdir);
adapter->fdir_active_fltr--;
} else {
fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
} }
}
spin_unlock_bh(&adapter->fdir_fltr_lock); spin_unlock_bh(&adapter->fdir_fltr_lock);
}
/**
* iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
* other to be removed.
* @adapter: board private structure
**/
static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
{
struct iavf_adv_rss *rss, *rsstmp;
/* remove all advance RSS configuration */ /* remove all advance RSS configuration */
spin_lock_bh(&adapter->adv_rss_lock); spin_lock_bh(&adapter->adv_rss_lock);
list_for_each_entry(rss, &adapter->adv_rss_list_head, list) list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
list) {
if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
list_del(&rss->list);
kfree(rss);
} else {
rss->state = IAVF_ADV_RSS_DEL_REQUEST; rss->state = IAVF_ADV_RSS_DEL_REQUEST;
}
}
spin_unlock_bh(&adapter->adv_rss_lock); spin_unlock_bh(&adapter->adv_rss_lock);
}
/**
* iavf_down - Shutdown the connection processing
* @adapter: board private structure
*
* Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
**/
void iavf_down(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
if (adapter->state <= __IAVF_DOWN_PENDING)
return;
netif_carrier_off(netdev);
netif_tx_disable(netdev);
adapter->link_up = false;
iavf_napi_disable_all(adapter);
iavf_irq_disable(adapter);
iavf_clear_mac_vlan_filters(adapter);
iavf_clear_cloud_filters(adapter);
iavf_clear_fdir_filters(adapter);
iavf_clear_adv_rss_conf(adapter);
if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) { if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
/* cancel any current operation */ /* cancel any current operation */
...@@ -1338,10 +1410,15 @@ void iavf_down(struct iavf_adapter *adapter) ...@@ -1338,10 +1410,15 @@ void iavf_down(struct iavf_adapter *adapter)
* here for this to complete. The watchdog is still running * here for this to complete. The watchdog is still running
* and it will take care of this. * and it will take care of this.
*/ */
adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; if (!list_empty(&adapter->mac_filter_list))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
if (!list_empty(&adapter->vlan_filter_list))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
if (!list_empty(&adapter->cloud_filter_list))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
if (!list_empty(&adapter->fdir_list_head))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
if (!list_empty(&adapter->adv_rss_list_head))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
} }
...@@ -4173,6 +4250,7 @@ static int iavf_open(struct net_device *netdev) ...@@ -4173,6 +4250,7 @@ static int iavf_open(struct net_device *netdev)
static int iavf_close(struct net_device *netdev) static int iavf_close(struct net_device *netdev)
{ {
struct iavf_adapter *adapter = netdev_priv(netdev); struct iavf_adapter *adapter = netdev_priv(netdev);
u64 aq_to_restore;
int status; int status;
mutex_lock(&adapter->crit_lock); mutex_lock(&adapter->crit_lock);
...@@ -4185,6 +4263,29 @@ static int iavf_close(struct net_device *netdev) ...@@ -4185,6 +4263,29 @@ static int iavf_close(struct net_device *netdev)
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter)) if (CLIENT_ENABLED(adapter))
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
/* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
* IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
* deadlock with adminq_task() until iavf_close timeouts. We must send
* IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
* disable queues possible for vf. Give only necessary flags to
* iavf_down and save other to set them right before iavf_close()
* returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
* iavf will be in DOWN state.
*/
aq_to_restore = adapter->aq_required;
adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
/* Remove flags which we do not want to send after close or we want to
* send before disable queues.
*/
aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG |
IAVF_FLAG_AQ_ENABLE_QUEUES |
IAVF_FLAG_AQ_CONFIGURE_QUEUES |
IAVF_FLAG_AQ_ADD_VLAN_FILTER |
IAVF_FLAG_AQ_ADD_MAC_FILTER |
IAVF_FLAG_AQ_ADD_CLOUD_FILTER |
IAVF_FLAG_AQ_ADD_FDIR_FILTER |
IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
iavf_down(adapter); iavf_down(adapter);
iavf_change_state(adapter, __IAVF_DOWN_PENDING); iavf_change_state(adapter, __IAVF_DOWN_PENDING);
...@@ -4208,6 +4309,10 @@ static int iavf_close(struct net_device *netdev) ...@@ -4208,6 +4309,10 @@ static int iavf_close(struct net_device *netdev)
msecs_to_jiffies(500)); msecs_to_jiffies(500));
if (!status) if (!status)
netdev_warn(netdev, "Device resources not yet released\n"); netdev_warn(netdev, "Device resources not yet released\n");
mutex_lock(&adapter->crit_lock);
adapter->aq_required |= aq_to_restore;
mutex_unlock(&adapter->crit_lock);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment