Commit eae93fe4 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2015-09-28

This series contains updates to i40e, i40evf and igb to resolve issues
seen and reported by Red Hat.

Kiran moves i40e_get_head() in preparation for the refactor of the Tx
timeout logic, so that it can be used in other areas of the driver.
Refactored the driver timeout logic by issuing a writeback request via
a software interrupt to the hardware the first time the driver detects
a hang.  This was due to the driver being too aggressive in resetting a
hung queue.

Shannon adds the GRE protocol to the transmit checksum encoding.

Anjali fixes an issue of forcing writeback too often, which caused us to
not benefit from NAPI.  We now disable force writeback in the clean
routine for X710 and XL710 adapters.  The X722 adapters do not enable
interrupt to force a writeback and benefit from WB_ON_ITR and so force
WB is left enabled for those adapters.  Fixed a possible deadlock issue
where sync_vsi_filters() can be called directly under RTNL or through
the timer subtask without RTNL.  So update the flow to see if we are
already under RTNL before trying to grab it.

Stefan Assmann provides a fix for igb where SR-IOV was not getting
enabled properly and we ran into a NULL pointer if the max_vfs module
parameter is specified.  This is prevented by setting the
IGB_FLAG_HAS_MSIX bit before calling igb_probe_vfs().

v2: added "i40e: Fix for recursive RTNL lock during PROMISC change" patch
    to the series, as it resolves another issues seen and reported by
    Red Hat.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 34c2d9fb cbfe360a
...@@ -243,7 +243,6 @@ struct i40e_pf { ...@@ -243,7 +243,6 @@ struct i40e_pf {
struct pci_dev *pdev; struct pci_dev *pdev;
struct i40e_hw hw; struct i40e_hw hw;
unsigned long state; unsigned long state;
unsigned long link_check_timeout;
struct msix_entry *msix_entries; struct msix_entry *msix_entries;
bool fc_autoneg_status; bool fc_autoneg_status;
...@@ -667,7 +666,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, ...@@ -667,7 +666,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
bool is_vf, bool is_netdev); bool is_vf, bool is_netdev);
void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
bool is_vf, bool is_netdev); bool is_vf, bool is_netdev);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi); int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1); u16 uplink, u32 param1);
int i40e_vsi_release(struct i40e_vsi *vsi); int i40e_vsi_release(struct i40e_vsi *vsi);
......
...@@ -1146,7 +1146,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ...@@ -1146,7 +1146,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} }
f = i40e_add_filter(vsi, ma, vlan, false, false); f = i40e_add_filter(vsi, ma, vlan, false, false);
ret = i40e_sync_vsi_filters(vsi); ret = i40e_sync_vsi_filters(vsi, true);
if (f && !ret) if (f && !ret)
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"add macaddr: %pM vlan=%d added to VSI %d\n", "add macaddr: %pM vlan=%d added to VSI %d\n",
...@@ -1183,7 +1183,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ...@@ -1183,7 +1183,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} }
i40e_del_filter(vsi, ma, vlan, false, false); i40e_del_filter(vsi, ma, vlan, false, false);
ret = i40e_sync_vsi_filters(vsi); ret = i40e_sync_vsi_filters(vsi, true);
if (!ret) if (!ret)
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"del macaddr: %pM vlan=%d removed from VSI %d\n", "del macaddr: %pM vlan=%d removed from VSI %d\n",
......
This diff is collapsed.
...@@ -600,20 +600,6 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) ...@@ -600,20 +600,6 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
} }
} }
/**
* i40e_get_head - Retrieve head from head writeback
* @tx_ring: tx ring to fetch head of
*
* Returns value of Tx ring head based on value stored
* in head write-back location
**/
static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
{
void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
return le32_to_cpu(*(volatile __le32 *)head);
}
/** /**
* i40e_get_tx_pending - how many tx descriptors not processed * i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors * @tx_ring: the ring of descriptors
...@@ -621,7 +607,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) ...@@ -621,7 +607,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
* Since there is no access to the ring head register * Since there is no access to the ring head register
* in XL710, we need to use our local copies * in XL710, we need to use our local copies
**/ **/
static u32 i40e_get_tx_pending(struct i40e_ring *ring) u32 i40e_get_tx_pending(struct i40e_ring *ring)
{ {
u32 head, tail; u32 head, tail;
...@@ -635,50 +621,6 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) ...@@ -635,50 +621,6 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
return 0; return 0;
} }
/**
* i40e_check_tx_hang - Is there a hang in the Tx queue
* @tx_ring: the ring of descriptors
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
u32 tx_done = tx_ring->stats.packets;
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
struct i40e_pf *pf = tx_ring->vsi->back;
bool ret = false;
clear_check_for_tx_hang(tx_ring);
/* Check for a hung queue, but be thorough. This verifies
* that a transmit has been completed since the previous
* check AND there is at least one packet pending. The
* ARMED bit is set to indicate a potential hang. The
* bit is cleared if a pause frame is received to remove
* false hang detection due to PFC or 802.3x frames. By
* requiring this to fail twice we avoid races with
* PFC clearing the ARMED bit and conditions where we
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
} else if (tx_done_old == tx_done &&
(tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
tx_pending, tx_ring->queue_index);
pf->tx_sluggish_count++;
} else {
/* update completed stats and disarm the hang check */
tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
#define WB_STRIDE 0x3 #define WB_STRIDE 0x3
/** /**
...@@ -784,42 +726,21 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -784,42 +726,21 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets; tx_ring->q_vector->tx.total_packets += total_packets;
/* check to see if there are any non-cache aligned descriptors if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
* waiting to be written back, and kick the hardware to force unsigned int j = 0;
* them to be written back in case of napi polling
/* check to see if there are < 4 descriptors
* waiting to be written back, then kick the hardware to force
* them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt.
*/ */
j = i40e_get_tx_pending(tx_ring);
if (budget && if (budget &&
!((i & WB_STRIDE) == WB_STRIDE) && ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
!test_bit(__I40E_DOWN, &tx_ring->vsi->state) && !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true; tx_ring->arm_wb = true;
else
tx_ring->arm_wb = false;
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
" VSI <%d>\n"
" Tx Queue <%d>\n"
" next_to_use <%x>\n"
" next_to_clean <%x>\n",
tx_ring->vsi->seid,
tx_ring->queue_index,
tx_ring->next_to_use, i);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
dev_info(tx_ring->dev,
"tx hang detected on queue %d, reset requested\n",
tx_ring->queue_index);
/* do not fire the reset immediately, wait for the stack to
* decide we are truly stuck, also prevents every queue from
* simultaneously requesting a reset
*/
/* the adapter is about to reset, no point in enabling polling */
budget = 1;
} }
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
...@@ -851,7 +772,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -851,7 +772,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
* @q_vector: the vector on which to force writeback * @q_vector: the vector on which to force writeback
* *
**/ **/
static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{ {
u16 flags = q_vector->tx.ring[0].flags; u16 flags = q_vector->tx.ring[0].flags;
...@@ -2324,6 +2245,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, ...@@ -2324,6 +2245,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break; break;
case IPPROTO_GRE:
l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
break;
default: default:
return; return;
} }
...@@ -2581,6 +2505,9 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2581,6 +2505,9 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0; u32 td_tag = 0;
dma_addr_t dma; dma_addr_t dma;
u16 gso_segs; u16 gso_segs;
u16 desc_count = 0;
bool tail_bump = true;
bool do_rs = false;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
...@@ -2621,6 +2548,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2621,6 +2548,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++; tx_desc++;
i++; i++;
desc_count++;
if (i == tx_ring->count) { if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0); tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0; i = 0;
...@@ -2640,6 +2569,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2640,6 +2569,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++; tx_desc++;
i++; i++;
desc_count++;
if (i == tx_ring->count) { if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0); tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0; i = 0;
...@@ -2654,51 +2585,80 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2654,51 +2585,80 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i]; tx_bi = &tx_ring->tx_bi[i];
} }
/* Place RS bit on last descriptor of any packet that spans across the /* set next_to_watch value indicating a packet is present */
* 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. first->next_to_watch = tx_desc;
i++;
if (i == tx_ring->count)
i = 0;
tx_ring->next_to_use = i;
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
* if xmit_more is supported
* if xmit_more is true
* do not update tail and do not mark RS bit.
* if xmit_more is false and last xmit_more was false
* if every packet spanned less than 4 desc
* then set RS bit on 4th packet and update tail
* on every packet
* else
* update tail and set RS bit on every packet.
* if xmit_more is false and last_xmit_more was true
* update tail and set RS bit.
*
* Optimization: wmb to be issued only in case of tail update.
* Also optimize the Descriptor WB path for RS bit with the same
* algorithm.
*
* Note: If there are less than 4 packets
* pending and interrupts were disabled the service task will
* trigger a force WB.
*/ */
if (((i & WB_STRIDE) != WB_STRIDE) && if (skb->xmit_more &&
(first <= &tx_ring->tx_bi[i]) && !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
(first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { tx_ring->queue_index))) {
tx_desc->cmd_type_offset_bsz = tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
build_ctob(td_cmd, td_offset, size, td_tag) | tail_bump = false;
cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << } else if (!skb->xmit_more &&
I40E_TXD_QW1_CMD_SHIFT); !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
tx_ring->packet_stride++;
} else { } else {
tx_ring->packet_stride = 0;
tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
do_rs = true;
}
if (do_rs)
tx_ring->packet_stride = 0;
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag) | build_ctob(td_cmd, td_offset, size, td_tag) |
cpu_to_le64((u64)I40E_TXD_CMD << cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
I40E_TX_DESC_CMD_EOP) <<
I40E_TXD_QW1_CMD_SHIFT); I40E_TXD_QW1_CMD_SHIFT);
}
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, /* notify HW of packet */
tx_ring->queue_index), if (!tail_bump)
first->bytecount); prefetchw(tx_desc + 1);
if (tail_bump) {
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,
* such as IA-64). * such as IA-64).
*/ */
wmb(); wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
i++;
if (i == tx_ring->count)
i = 0;
tx_ring->next_to_use = i;
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
if (!skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index)))
writel(i, tx_ring->tail); writel(i, tx_ring->tail);
else }
prefetchw(tx_desc + 1);
return; return;
......
...@@ -199,8 +199,6 @@ struct i40e_rx_queue_stats { ...@@ -199,8 +199,6 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t { enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE, __I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE, __I40E_TX_XPS_INIT_DONE,
__I40E_TX_DETECT_HANG,
__I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED, __I40E_RX_PS_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED, __I40E_RX_16BYTE_DESC_ENABLED,
}; };
...@@ -211,12 +209,6 @@ enum i40e_ring_state_t { ...@@ -211,12 +209,6 @@ enum i40e_ring_state_t {
set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
#define clear_ring_ps_enabled(ring) \ #define clear_ring_ps_enabled(ring) \
clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
#define check_for_tx_hang(ring) \
test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \ #define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \ #define set_ring_16byte_desc_enabled(ring) \
...@@ -264,10 +256,12 @@ struct i40e_ring { ...@@ -264,10 +256,12 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */ bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */ bool arm_wb; /* do something to arm write back */
u8 packet_stride;
u16 flags; u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) #define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1)
#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
/* stats structs */ /* stats structs */
struct i40e_queue_stats stats; struct i40e_queue_stats stats;
...@@ -326,4 +320,20 @@ int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring); ...@@ -326,4 +320,20 @@ int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring);
int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, u32 *flags); struct i40e_ring *tx_ring, u32 *flags);
#endif #endif
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring);
/**
* i40e_get_head - Retrieve head from head writeback
* @tx_ring: tx ring to fetch head of
*
* Returns value of Tx ring head based on value stored
* in head write-back location
**/
static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
{
void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
return le32_to_cpu(*(volatile __le32 *)head);
}
#endif /* _I40E_TXRX_H_ */ #endif /* _I40E_TXRX_H_ */
...@@ -561,7 +561,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) ...@@ -561,7 +561,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
} }
/* program mac filter */ /* program mac filter */
ret = i40e_sync_vsi_filters(vsi); ret = i40e_sync_vsi_filters(vsi, false);
if (ret) if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
...@@ -1605,7 +1605,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1605,7 +1605,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
} }
/* program the updated filter list */ /* program the updated filter list */
if (i40e_sync_vsi_filters(vsi)) if (i40e_sync_vsi_filters(vsi, false))
dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
error_param: error_param:
...@@ -1656,7 +1656,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1656,7 +1656,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
I40E_VLAN_ANY, true, false); I40E_VLAN_ANY, true, false);
/* program the updated filter list */ /* program the updated filter list */
if (i40e_sync_vsi_filters(vsi)) if (i40e_sync_vsi_filters(vsi, false))
dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
error_param: error_param:
...@@ -2062,7 +2062,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -2062,7 +2062,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */ /* program mac filter */
if (i40e_sync_vsi_filters(vsi)) { if (i40e_sync_vsi_filters(vsi, false)) {
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
ret = -EIO; ret = -EIO;
goto error_param; goto error_param;
......
...@@ -140,65 +140,6 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) ...@@ -140,65 +140,6 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
return le32_to_cpu(*(volatile __le32 *)head); return le32_to_cpu(*(volatile __le32 *)head);
} }
/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
* Since there is no access to the ring head register
* in XL710, we need to use our local copies
**/
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
u32 head, tail;
head = i40e_get_head(ring);
tail = readl(ring->tail);
if (head != tail)
return (head < tail) ?
tail - head : (tail + ring->count - head);
return 0;
}
/**
* i40e_check_tx_hang - Is there a hang in the Tx queue
* @tx_ring: the ring of descriptors
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
u32 tx_done = tx_ring->stats.packets;
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
bool ret = false;
clear_check_for_tx_hang(tx_ring);
/* Check for a hung queue, but be thorough. This verifies
* that a transmit has been completed since the previous
* check AND there is at least one packet pending. The
* ARMED bit is set to indicate a potential hang. The
* bit is cleared if a pause frame is received to remove
* false hang detection due to PFC or 802.3x frames. By
* requiring this to fail twice we avoid races with
* PFC clearing the ARMED bit and conditions where we
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
} else if (tx_done_old == tx_done &&
(tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
/* update completed stats and disarm the hang check */
tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
#define WB_STRIDE 0x3 #define WB_STRIDE 0x3
/** /**
...@@ -304,6 +245,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -304,6 +245,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets; tx_ring->q_vector->tx.total_packets += total_packets;
/* check to see if there are any non-cache aligned descriptors
* waiting to be written back, and kick the hardware to force
* them to be written back in case of napi polling
*/
if (budget && if (budget &&
!((i & WB_STRIDE) == WB_STRIDE) && !((i & WB_STRIDE) == WB_STRIDE) &&
!test_bit(__I40E_DOWN, &tx_ring->vsi->state) && !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
...@@ -312,29 +257,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -312,29 +257,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
else else
tx_ring->arm_wb = false; tx_ring->arm_wb = false;
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
" VSI <%d>\n"
" Tx Queue <%d>\n"
" next_to_use <%x>\n"
" next_to_clean <%x>\n",
tx_ring->vsi->seid,
tx_ring->queue_index,
tx_ring->next_to_use, i);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
dev_info(tx_ring->dev,
"tx hang detected on queue %d, resetting adapter\n",
tx_ring->queue_index);
tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
/* the adapter is about to reset, no point in enabling stuff */
return true;
}
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index), tx_ring->queue_index),
total_packets, total_bytes); total_packets, total_bytes);
...@@ -355,16 +277,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -355,16 +277,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
} }
} }
return budget > 0; return !!budget;
} }
/** /**
* i40e_force_wb -Arm hardware to do a wb on noncache aligned descriptors * i40evf_force_wb -Arm hardware to do a wb on noncache aligned descriptors
* @vsi: the VSI we care about * @vsi: the VSI we care about
* @q_vector: the vector on which to force writeback * @q_vector: the vector on which to force writeback
* *
**/ **/
static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{ {
u16 flags = q_vector->tx.ring[0].flags; u16 flags = q_vector->tx.ring[0].flags;
...@@ -1385,7 +1307,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1385,7 +1307,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
/* If work not completed, return budget and polling will return */ /* If work not completed, return budget and polling will return */
if (!clean_complete) { if (!clean_complete) {
if (arm_wb) if (arm_wb)
i40e_force_wb(vsi, q_vector); i40evf_force_wb(vsi, q_vector);
return budget; return budget;
} }
......
...@@ -198,8 +198,6 @@ struct i40e_rx_queue_stats { ...@@ -198,8 +198,6 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t { enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE, __I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE, __I40E_TX_XPS_INIT_DONE,
__I40E_TX_DETECT_HANG,
__I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED, __I40E_RX_PS_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED, __I40E_RX_16BYTE_DESC_ENABLED,
}; };
...@@ -210,12 +208,6 @@ enum i40e_ring_state_t { ...@@ -210,12 +208,6 @@ enum i40e_ring_state_t {
set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
#define clear_ring_ps_enabled(ring) \ #define clear_ring_ps_enabled(ring) \
clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
#define check_for_tx_hang(ring) \
test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \ #define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \ #define set_ring_16byte_desc_enabled(ring) \
......
...@@ -2986,6 +2986,9 @@ static int igb_sw_init(struct igb_adapter *adapter) ...@@ -2986,6 +2986,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
} }
#endif /* CONFIG_PCI_IOV */ #endif /* CONFIG_PCI_IOV */
/* Assume MSI-X interrupts, will be checked during IRQ allocation */
adapter->flags |= IGB_FLAG_HAS_MSIX;
igb_probe_vfs(adapter); igb_probe_vfs(adapter);
igb_init_queue_configuration(adapter); igb_init_queue_configuration(adapter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment