Commit 95aef7ce authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2016-05-05

This series contains updates to i40e and i40evf.

The theme behind this series is code reduction, yeah!  Jesse provides
most of the changes starting with a refactor of the interpretation of
a tunnel which lets us start using the hardware's parsing.  Removed
the packet split receive routine and ancillary code in preparation
for the Rx-refactor.  The refactor of the receive routine,
aligns the receive routine with the one in ixgbe which was highly
optimized.  The hardware supports a 16 byte descriptor for receive,
but the driver was never using it in production.  There was no performance
benefit to the real driver of 16 byte descriptors, so drop a whole lot
of complexity while getting rid of the code.  Fixed a bug where while
changing the number of descriptors using ethtool, the driver did not
test the limits of the system memory before permanently assuming it
would be able to get receive buffer memory.

Mitch fixes a memory leak of one page each time the driver is opened by
allocating the correct number of receive buffers and do not fiddle with
next_to_use in the VF driver.

Arnd Bergmann fixed a indentation issue by adding the appropriate
curly braces in i40e_vc_config_promiscuous_mode_msg().

Julia Lawall fixed an issue found by Coccinelle, where i40e_client_ops
structure can be const since it is never modified.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b3b4663c 3949c4ac
...@@ -1863,7 +1863,7 @@ static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev, ...@@ -1863,7 +1863,7 @@ static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
} }
/* client interface functions */ /* client interface functions */
static struct i40e_client_ops i40e_ops = { static const struct i40e_client_ops i40e_ops = {
.open = i40iw_open, .open = i40iw_open,
.close = i40iw_close, .close = i40iw_close,
.l2_param_change = i40iw_l2param_change, .l2_param_change = i40iw_l2param_change,
......
...@@ -101,7 +101,6 @@ ...@@ -101,7 +101,6 @@
#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) #define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
#define I40E_PRIV_FLAGS_FD_ATR BIT(2) #define I40E_PRIV_FLAGS_FD_ATR BIT(2)
#define I40E_PRIV_FLAGS_VEB_STATS BIT(3) #define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
#define I40E_PRIV_FLAGS_PS BIT(4)
#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(5) #define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(5)
#define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_SHIFT 0
...@@ -123,10 +122,7 @@ ...@@ -123,10 +122,7 @@
#define XSTRINGIFY(bar) STRINGIFY(bar) #define XSTRINGIFY(bar) STRINGIFY(bar)
#define I40E_RX_DESC(R, i) \ #define I40E_RX_DESC(R, i) \
((ring_is_16byte_desc_enabled(R)) \ (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
? (union i40e_32byte_rx_desc *) \
(&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
: (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
#define I40E_TX_DESC(R, i) \ #define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i])) (&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \ #define I40E_TX_CTXTDESC(R, i) \
...@@ -320,8 +316,6 @@ struct i40e_pf { ...@@ -320,8 +316,6 @@ struct i40e_pf {
#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
#define I40E_FLAG_MSI_ENABLED BIT_ULL(2) #define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4)
#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5)
#define I40E_FLAG_RSS_ENABLED BIT_ULL(6) #define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8) #define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
...@@ -330,7 +324,6 @@ struct i40e_pf { ...@@ -330,7 +324,6 @@ struct i40e_pf {
#ifdef I40E_FCOE #ifdef I40E_FCOE
#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11) #define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
#endif /* I40E_FCOE */ #endif /* I40E_FCOE */
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) #define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15) #define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16) #define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
...@@ -534,9 +527,7 @@ struct i40e_vsi { ...@@ -534,9 +527,7 @@ struct i40e_vsi {
u8 *rss_lut_user; /* User configured lookup table entries */ u8 *rss_lut_user; /* User configured lookup table entries */
u16 max_frame; u16 max_frame;
u16 rx_hdr_len;
u16 rx_buf_len; u16 rx_buf_len;
u8 dtype;
/* List of q_vectors allocated to this VSI */ /* List of q_vectors allocated to this VSI */
struct i40e_q_vector **q_vectors; struct i40e_q_vector **q_vectors;
......
...@@ -217,7 +217,7 @@ struct i40e_client { ...@@ -217,7 +217,7 @@ struct i40e_client {
#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) #define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) #define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
enum i40e_client_type type; enum i40e_client_type type;
struct i40e_client_ops *ops; /* client ops provided by the client */ const struct i40e_client_ops *ops; /* client ops provided by the client */
}; };
static inline bool i40e_client_is_registered(struct i40e_client *client) static inline bool i40e_client_is_registered(struct i40e_client *client)
......
...@@ -268,13 +268,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -268,13 +268,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->queue_index, rx_ring->queue_index,
rx_ring->reg_idx); rx_ring->reg_idx);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", " rx_rings[%i]: rx_buf_len = %d\n",
i, rx_ring->rx_hdr_len, i, rx_ring->rx_buf_len);
rx_ring->rx_buf_len,
rx_ring->dtype);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i, ring_is_ps_enabled(rx_ring), i,
rx_ring->next_to_use, rx_ring->next_to_use,
rx_ring->next_to_clean, rx_ring->next_to_clean,
rx_ring->ring_active); rx_ring->ring_active);
...@@ -325,9 +323,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -325,9 +323,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
i, tx_ring->state, i, tx_ring->state,
tx_ring->queue_index, tx_ring->queue_index,
tx_ring->reg_idx); tx_ring->reg_idx);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: dtype = %d\n",
i, tx_ring->dtype);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i, i,
...@@ -365,8 +360,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -365,8 +360,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" work_limit = %d\n", " work_limit = %d\n",
vsi->work_limit); vsi->work_limit);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", " max_frame = %d, rx_buf_len = %d dtype = %d\n",
vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); vsi->max_frame, vsi->rx_buf_len, 0);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" num_q_vectors = %i, base_vector = %i\n", " num_q_vectors = %i, base_vector = %i\n",
vsi->num_q_vectors, vsi->base_vector); vsi->num_q_vectors, vsi->base_vector);
...@@ -591,13 +586,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, ...@@ -591,13 +586,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
" d[%03x] = 0x%016llx 0x%016llx\n", " d[%03x] = 0x%016llx 0x%016llx\n",
i, txd->buffer_addr, i, txd->buffer_addr,
txd->cmd_type_offset_bsz); txd->cmd_type_offset_bsz);
} else if (sizeof(union i40e_rx_desc) ==
sizeof(union i40e_16byte_rx_desc)) {
rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
" d[%03x] = 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr,
rxd->read.hdr_addr);
} else { } else {
rxd = I40E_RX_DESC(ring, i); rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
...@@ -619,13 +607,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, ...@@ -619,13 +607,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
"vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n, vsi_seid, ring_id, desc_n,
txd->buffer_addr, txd->cmd_type_offset_bsz); txd->buffer_addr, txd->cmd_type_offset_bsz);
} else if (sizeof(union i40e_rx_desc) ==
sizeof(union i40e_16byte_rx_desc)) {
rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
"vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
rxd->read.pkt_addr, rxd->read.hdr_addr);
} else { } else {
rxd = I40E_RX_DESC(ring, desc_n); rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
......
...@@ -235,7 +235,6 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { ...@@ -235,7 +235,6 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
"LinkPolling", "LinkPolling",
"flow-director-atr", "flow-director-atr",
"veb-stats", "veb-stats",
"packet-split",
"hw-atr-eviction", "hw-atr-eviction",
}; };
...@@ -1275,6 +1274,13 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -1275,6 +1274,13 @@ static int i40e_set_ringparam(struct net_device *netdev,
} }
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
/* this is to allow wr32 to have something to write to
* during early allocation of Rx buffers
*/
u32 __iomem faketail = 0;
struct i40e_ring *ring;
u16 unused;
/* clone ring and setup updated count */ /* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count; rx_rings[i].count = new_rx_count;
...@@ -1283,12 +1289,22 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -1283,12 +1289,22 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/ */
rx_rings[i].desc = NULL; rx_rings[i].desc = NULL;
rx_rings[i].rx_bi = NULL; rx_rings[i].rx_bi = NULL;
rx_rings[i].tail = (u8 __iomem *)&faketail;
err = i40e_setup_rx_descriptors(&rx_rings[i]); err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err)
goto rx_unwind;
/* now allocate the Rx buffers to make sure the OS
* has enough memory, any failure here means abort
*/
ring = &rx_rings[i];
unused = I40E_DESC_UNUSED(ring);
err = i40e_alloc_rx_buffers(ring, unused);
rx_unwind:
if (err) { if (err) {
while (i) { do {
i--;
i40e_free_rx_resources(&rx_rings[i]); i40e_free_rx_resources(&rx_rings[i]);
} } while (i--);
kfree(rx_rings); kfree(rx_rings);
rx_rings = NULL; rx_rings = NULL;
...@@ -1314,6 +1330,17 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -1314,6 +1330,17 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (rx_rings) { if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_rx_resources(vsi->rx_rings[i]); i40e_free_rx_resources(vsi->rx_rings[i]);
/* get the real tail offset */
rx_rings[i].tail = vsi->rx_rings[i]->tail;
/* this is to fake out the allocation routine
* into thinking it has to realloc everything
* but the recycling logic will let us re-use
* the buffers allocated above
*/
rx_rings[i].next_to_use = 0;
rx_rings[i].next_to_clean = 0;
rx_rings[i].next_to_alloc = 0;
/* do a struct copy */
*vsi->rx_rings[i] = rx_rings[i]; *vsi->rx_rings[i] = rx_rings[i];
} }
kfree(rx_rings); kfree(rx_rings);
...@@ -2829,8 +2856,6 @@ static u32 i40e_get_priv_flags(struct net_device *dev) ...@@ -2829,8 +2856,6 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
I40E_PRIV_FLAGS_FD_ATR : 0; I40E_PRIV_FLAGS_FD_ATR : 0;
ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ? ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
I40E_PRIV_FLAGS_VEB_STATS : 0; I40E_PRIV_FLAGS_VEB_STATS : 0;
ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ?
I40E_PRIV_FLAGS_PS : 0;
ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ? ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
0 : I40E_PRIV_FLAGS_HW_ATR_EVICT; 0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
...@@ -2851,23 +2876,6 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) ...@@ -2851,23 +2876,6 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
/* NOTE: MFP is not settable */ /* NOTE: MFP is not settable */
/* allow the user to control the method of receive
* buffer DMA, whether the packet is split at header
* boundaries into two separate buffers. In some cases
* one routine or the other will perform better.
*/
if ((flags & I40E_PRIV_FLAGS_PS) &&
!(pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
pf->flags |= I40E_FLAG_RX_PS_ENABLED;
pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED;
reset_required = true;
} else if (!(flags & I40E_PRIV_FLAGS_PS) &&
(pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
pf->flags &= ~I40E_FLAG_RX_PS_ENABLED;
pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
reset_required = true;
}
if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG) if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED; pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
else else
......
...@@ -2855,34 +2855,21 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ...@@ -2855,34 +2855,21 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
memset(&rx_ctx, 0, sizeof(rx_ctx)); memset(&rx_ctx, 0, sizeof(rx_ctx));
ring->rx_buf_len = vsi->rx_buf_len; ring->rx_buf_len = vsi->rx_buf_len;
ring->rx_hdr_len = vsi->rx_hdr_len;
rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
rx_ctx.base = (ring->dma / 128); rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count; rx_ctx.qlen = ring->count;
if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { /* use 32 byte descriptors */
set_ring_16byte_desc_enabled(ring);
rx_ctx.dsize = 0;
} else {
rx_ctx.dsize = 1; rx_ctx.dsize = 1;
}
rx_ctx.dtype = vsi->dtype; /* descriptor type is always zero
if (vsi->dtype) { * rx_ctx.dtype = 0;
set_ring_ps_enabled(ring); */
rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
I40E_RX_SPLIT_IP |
I40E_RX_SPLIT_TCP_UDP |
I40E_RX_SPLIT_SCTP;
} else {
rx_ctx.hsplit_0 = 0; rx_ctx.hsplit_0 = 0;
}
rx_ctx.rxmax = min_t(u16, vsi->max_frame, rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
(chain_len * ring->rx_buf_len));
if (hw->revision_id == 0) if (hw->revision_id == 0)
rx_ctx.lrxqthresh = 0; rx_ctx.lrxqthresh = 0;
else else
...@@ -2919,12 +2906,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ...@@ -2919,12 +2906,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail); writel(0, ring->tail);
if (ring_is_ps_enabled(ring)) { i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
i40e_alloc_rx_headers(ring);
i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
} else {
i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
}
return 0; return 0;
} }
...@@ -2963,40 +2945,18 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) ...@@ -2963,40 +2945,18 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
else else
vsi->max_frame = I40E_RXBUFFER_2048; vsi->max_frame = I40E_RXBUFFER_2048;
/* figure out correct receive buffer length */
switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
I40E_FLAG_RX_PS_ENABLED)) {
case I40E_FLAG_RX_1BUF_ENABLED:
vsi->rx_hdr_len = 0;
vsi->rx_buf_len = vsi->max_frame;
vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
break;
case I40E_FLAG_RX_PS_ENABLED:
vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
vsi->rx_buf_len = I40E_RXBUFFER_2048; vsi->rx_buf_len = I40E_RXBUFFER_2048;
vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
break;
default:
vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
vsi->rx_buf_len = I40E_RXBUFFER_2048;
vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
break;
}
#ifdef I40E_FCOE #ifdef I40E_FCOE
/* setup rx buffer for FCoE */ /* setup rx buffer for FCoE */
if ((vsi->type == I40E_VSI_FCOE) && if ((vsi->type == I40E_VSI_FCOE) &&
(vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
vsi->rx_hdr_len = 0;
vsi->rx_buf_len = I40E_RXBUFFER_3072; vsi->rx_buf_len = I40E_RXBUFFER_3072;
vsi->max_frame = I40E_RXBUFFER_3072; vsi->max_frame = I40E_RXBUFFER_3072;
vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
} }
#endif /* I40E_FCOE */ #endif /* I40E_FCOE */
/* round up for the chip's needs */ /* round up for the chip's needs */
vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
...@@ -7512,10 +7472,6 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ...@@ -7512,10 +7472,6 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
rx_ring->count = vsi->num_desc; rx_ring->count = vsi->num_desc;
rx_ring->size = 0; rx_ring->size = 0;
rx_ring->dcb_tc = 0; rx_ring->dcb_tc = 0;
if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
set_ring_16byte_desc_enabled(rx_ring);
else
clear_ring_16byte_desc_enabled(rx_ring);
rx_ring->rx_itr_setting = pf->rx_itr_default; rx_ring->rx_itr_setting = pf->rx_itr_default;
vsi->rx_rings[i] = rx_ring; vsi->rx_rings[i] = rx_ring;
} }
...@@ -8460,11 +8416,6 @@ static int i40e_sw_init(struct i40e_pf *pf) ...@@ -8460,11 +8416,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_MSI_ENABLED | I40E_FLAG_MSI_ENABLED |
I40E_FLAG_MSIX_ENABLED; I40E_FLAG_MSIX_ENABLED;
if (iommu_present(&pci_bus_type))
pf->flags |= I40E_FLAG_RX_PS_ENABLED;
else
pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
/* Set default ITR */ /* Set default ITR */
pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
...@@ -10696,11 +10647,9 @@ static void i40e_print_features(struct i40e_pf *pf) ...@@ -10696,11 +10647,9 @@ static void i40e_print_features(struct i40e_pf *pf)
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
#endif #endif
i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s", i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
pf->hw.func_caps.num_vsis, pf->hw.func_caps.num_vsis,
pf->vsi[pf->lan_vsi]->num_queue_pairs, pf->vsi[pf->lan_vsi]->num_queue_pairs);
pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
if (pf->flags & I40E_FLAG_RSS_ENABLED) if (pf->flags & I40E_FLAG_RSS_ENABLED)
i += snprintf(&buf[i], REMAIN(i), " RSS"); i += snprintf(&buf[i], REMAIN(i), " RSS");
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
......
This diff is collapsed.
...@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t { ...@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t {
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_512 512 /* Used for packet split */ #define I40E_RXBUFFER_256 256
#define I40E_RXBUFFER_2048 2048 #define I40E_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ #define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
#define I40E_RXBUFFER_4096 4096 #define I40E_RXBUFFER_4096 4096
...@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t { ...@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t {
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more, * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
* this adds up to 512 bytes of extra data meaning the smallest allocation * this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1K. * we could have is 1K.
* i.e. RXBUFFER_512 --> size-1024 slab * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
* i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
*/ */
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define i40e_rx_desc i40e_32byte_rx_desc
/**
* i40e_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
* @stat_err_bits: value to mask
*
* This function does some fast chicanery in order to return the
* value of the mask which is really only used for boolean tests.
* The status_error_len doesn't need to be shifted because it begins
* at offset zero.
*/
static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
const u64 stat_err_bits)
{
return !!(rx_desc->wb.qword1.status_error_len &
cpu_to_le64(stat_err_bits));
}
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
...@@ -142,8 +161,6 @@ enum i40e_dyn_idx_t { ...@@ -142,8 +161,6 @@ enum i40e_dyn_idx_t {
prefetch((n)); \ prefetch((n)); \
} while (0) } while (0)
#define i40e_rx_desc i40e_32byte_rx_desc
#define I40E_MAX_BUFFER_TXD 8 #define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17 #define I40E_MIN_TX_LEN 17
...@@ -213,10 +230,8 @@ struct i40e_tx_buffer { ...@@ -213,10 +230,8 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer { struct i40e_rx_buffer {
struct sk_buff *skb; struct sk_buff *skb;
void *hdr_buf;
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
dma_addr_t page_dma;
unsigned int page_offset; unsigned int page_offset;
}; };
...@@ -245,22 +260,18 @@ struct i40e_rx_queue_stats { ...@@ -245,22 +260,18 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t { enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE, __I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE, __I40E_TX_XPS_INIT_DONE,
__I40E_RX_PS_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
}; };
#define ring_is_ps_enabled(ring) \ /* some useful defines for virtchannel interface, which
test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) * is the only remaining user of header split
#define set_ring_ps_enabled(ring) \ */
set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) #define I40E_RX_DTYPE_NO_SPLIT 0
#define clear_ring_ps_enabled(ring) \ #define I40E_RX_DTYPE_HEADER_SPLIT 1
clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) #define I40E_RX_DTYPE_SPLIT_ALWAYS 2
#define ring_is_16byte_desc_enabled(ring) \ #define I40E_RX_SPLIT_L2 0x1
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) #define I40E_RX_SPLIT_IP 0x2
#define set_ring_16byte_desc_enabled(ring) \ #define I40E_RX_SPLIT_TCP_UDP 0x4
set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) #define I40E_RX_SPLIT_SCTP 0x8
#define clear_ring_16byte_desc_enabled(ring) \
clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
/* struct that defines a descriptor ring, associated with a VSI */ /* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring { struct i40e_ring {
...@@ -287,16 +298,7 @@ struct i40e_ring { ...@@ -287,16 +298,7 @@ struct i40e_ring {
u16 count; /* Number of descriptors */ u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */ u16 reg_idx; /* HW register index of the ring */
u16 rx_hdr_len;
u16 rx_buf_len; u16 rx_buf_len;
u8 dtype;
#define I40E_RX_DTYPE_NO_SPLIT 0
#define I40E_RX_DTYPE_HEADER_SPLIT 1
#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
#define I40E_RX_SPLIT_L2 0x1
#define I40E_RX_SPLIT_IP 0x2
#define I40E_RX_SPLIT_TCP_UDP 0x4
#define I40E_RX_SPLIT_SCTP 0x8
/* used in interrupt processing */ /* used in interrupt processing */
u16 next_to_use; u16 next_to_use;
...@@ -330,6 +332,7 @@ struct i40e_ring { ...@@ -330,6 +332,7 @@ struct i40e_ring {
struct i40e_q_vector *q_vector; /* Backreference to associated vector */ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */ struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
enum i40e_latency_range { enum i40e_latency_range {
...@@ -353,9 +356,7 @@ struct i40e_ring_container { ...@@ -353,9 +356,7 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \ #define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next) for (pos = (head).ring; pos != NULL; pos = pos->next)
bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
void i40e_alloc_rx_headers(struct i40e_ring *rxr);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring); void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_rx_ring(struct i40e_ring *rx_ring); void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
......
...@@ -590,7 +590,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, ...@@ -590,7 +590,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
} }
rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
/* set splitalways mode 10b */ /* set split mode 10b */
rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
} }
...@@ -1544,7 +1544,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1544,7 +1544,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
list_for_each_entry(f, &vsi->mac_filter_list, list) { list_for_each_entry(f, &vsi->mac_filter_list, list) {
aq_ret = 0; aq_ret = 0;
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) {
aq_ret = aq_ret =
i40e_aq_set_vsi_uc_promisc_on_vlan(hw, i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
vsi->seid, vsi->seid,
...@@ -1552,6 +1552,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1552,6 +1552,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
f->vlan, f->vlan,
NULL); NULL);
aq_err = pf->hw.aq.asq_last_status; aq_err = pf->hw.aq.asq_last_status;
}
if (aq_ret) if (aq_ret)
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
......
...@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t { ...@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t {
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_512 512 /* Used for packet split */ #define I40E_RXBUFFER_256 256
#define I40E_RXBUFFER_2048 2048 #define I40E_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ #define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
#define I40E_RXBUFFER_4096 4096 #define I40E_RXBUFFER_4096 4096
...@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t { ...@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t {
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more, * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
* this adds up to 512 bytes of extra data meaning the smallest allocation * this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1K. * we could have is 1K.
* i.e. RXBUFFER_512 --> size-1024 slab * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
* i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
*/ */
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define i40e_rx_desc i40e_32byte_rx_desc
/**
* i40e_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
* @stat_err_bits: value to mask
*
* This function does some fast chicanery in order to return the
* value of the mask which is really only used for boolean tests.
* The status_error_len doesn't need to be shifted because it begins
* at offset zero.
*/
static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
const u64 stat_err_bits)
{
return !!(rx_desc->wb.qword1.status_error_len &
cpu_to_le64(stat_err_bits));
}
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
...@@ -142,8 +161,6 @@ enum i40e_dyn_idx_t { ...@@ -142,8 +161,6 @@ enum i40e_dyn_idx_t {
prefetch((n)); \ prefetch((n)); \
} while (0) } while (0)
#define i40e_rx_desc i40e_32byte_rx_desc
#define I40E_MAX_BUFFER_TXD 8 #define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17 #define I40E_MIN_TX_LEN 17
...@@ -212,10 +229,8 @@ struct i40e_tx_buffer { ...@@ -212,10 +229,8 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer { struct i40e_rx_buffer {
struct sk_buff *skb; struct sk_buff *skb;
void *hdr_buf;
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
dma_addr_t page_dma;
unsigned int page_offset; unsigned int page_offset;
}; };
...@@ -244,22 +259,18 @@ struct i40e_rx_queue_stats { ...@@ -244,22 +259,18 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t { enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE, __I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE, __I40E_TX_XPS_INIT_DONE,
__I40E_RX_PS_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
}; };
#define ring_is_ps_enabled(ring) \ /* some useful defines for virtchannel interface, which
test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) * is the only remaining user of header split
#define set_ring_ps_enabled(ring) \ */
set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) #define I40E_RX_DTYPE_NO_SPLIT 0
#define clear_ring_ps_enabled(ring) \ #define I40E_RX_DTYPE_HEADER_SPLIT 1
clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) #define I40E_RX_DTYPE_SPLIT_ALWAYS 2
#define ring_is_16byte_desc_enabled(ring) \ #define I40E_RX_SPLIT_L2 0x1
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) #define I40E_RX_SPLIT_IP 0x2
#define set_ring_16byte_desc_enabled(ring) \ #define I40E_RX_SPLIT_TCP_UDP 0x4
set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) #define I40E_RX_SPLIT_SCTP 0x8
#define clear_ring_16byte_desc_enabled(ring) \
clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
/* struct that defines a descriptor ring, associated with a VSI */ /* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring { struct i40e_ring {
...@@ -278,16 +289,7 @@ struct i40e_ring { ...@@ -278,16 +289,7 @@ struct i40e_ring {
u16 count; /* Number of descriptors */ u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */ u16 reg_idx; /* HW register index of the ring */
u16 rx_hdr_len;
u16 rx_buf_len; u16 rx_buf_len;
u8 dtype;
#define I40E_RX_DTYPE_NO_SPLIT 0
#define I40E_RX_DTYPE_HEADER_SPLIT 1
#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
#define I40E_RX_SPLIT_L2 0x1
#define I40E_RX_SPLIT_IP 0x2
#define I40E_RX_SPLIT_TCP_UDP 0x4
#define I40E_RX_SPLIT_SCTP 0x8
/* used in interrupt processing */ /* used in interrupt processing */
u16 next_to_use; u16 next_to_use;
...@@ -319,6 +321,7 @@ struct i40e_ring { ...@@ -319,6 +321,7 @@ struct i40e_ring {
struct i40e_q_vector *q_vector; /* Backreference to associated vector */ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */ struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
enum i40e_latency_range { enum i40e_latency_range {
...@@ -342,9 +345,7 @@ struct i40e_ring_container { ...@@ -342,9 +345,7 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \ #define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next) for (pos = (head).ring; pos != NULL; pos = pos->next)
bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
......
...@@ -80,9 +80,6 @@ struct i40e_vsi { ...@@ -80,9 +80,6 @@ struct i40e_vsi {
#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 #define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
#define I40EVF_RXBUFFER_128 128 /* Used for packet split */
#define I40EVF_RXBUFFER_256 256 /* Used for packet split */
#define I40EVF_RXBUFFER_2048 2048 #define I40EVF_RXBUFFER_2048 2048
#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ #define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define I40EVF_MAX_AQ_BUF_SIZE 4096 #define I40EVF_MAX_AQ_BUF_SIZE 4096
...@@ -208,9 +205,6 @@ struct i40evf_adapter { ...@@ -208,9 +205,6 @@ struct i40evf_adapter {
u32 flags; u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) #define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1)
#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2)
#define I40EVF_FLAG_RX_PS_ENABLED BIT(3)
#define I40EVF_FLAG_IMIR_ENABLED BIT(5) #define I40EVF_FLAG_IMIR_ENABLED BIT(5)
#define I40EVF_FLAG_MQ_CAPABLE BIT(6) #define I40EVF_FLAG_MQ_CAPABLE BIT(6)
#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) #define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
...@@ -295,7 +289,6 @@ struct i40evf_adapter { ...@@ -295,7 +289,6 @@ struct i40evf_adapter {
/* Ethtool Private Flags */ /* Ethtool Private Flags */
#define I40EVF_PRIV_FLAGS_PS BIT(0)
/* needed by i40evf_ethtool.c */ /* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[]; extern char i40evf_driver_name[];
......
...@@ -63,12 +63,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { ...@@ -63,12 +63,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
#define I40EVF_STATS_LEN(_dev) \ #define I40EVF_STATS_LEN(_dev) \
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = {
"packet-split",
};
#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings)
/** /**
* i40evf_get_settings - Get Link Speed and Duplex settings * i40evf_get_settings - Get Link Speed and Duplex settings
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -103,8 +97,6 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset) ...@@ -103,8 +97,6 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset)
{ {
if (sset == ETH_SS_STATS) if (sset == ETH_SS_STATS)
return I40EVF_STATS_LEN(netdev); return I40EVF_STATS_LEN(netdev);
else if (sset == ETH_SS_PRIV_FLAGS)
return I40EVF_PRIV_FLAGS_STR_LEN;
else else
return -EINVAL; return -EINVAL;
} }
...@@ -170,12 +162,6 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) ...@@ -170,12 +162,6 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
} else if (sset == ETH_SS_PRIV_FLAGS) {
for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
memcpy(data, i40evf_priv_flags_strings[i],
ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
} }
} }
...@@ -225,7 +211,6 @@ static void i40evf_get_drvinfo(struct net_device *netdev, ...@@ -225,7 +211,6 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, i40evf_driver_version, 32); strlcpy(drvinfo->version, i40evf_driver_version, 32);
strlcpy(drvinfo->fw_version, "N/A", 4); strlcpy(drvinfo->fw_version, "N/A", 4);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
} }
/** /**
...@@ -515,54 +500,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, ...@@ -515,54 +500,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
return i40evf_config_rss(adapter); return i40evf_config_rss(adapter);
} }
/**
* i40evf_get_priv_flags - report device private flags
* @dev: network interface device structure
*
* The get string set count and the string set should be matched for each
* flag returned. Add new strings for each flag to the i40e_priv_flags_strings
* array.
*
* Returns a u32 bitmap of flags.
**/
static u32 i40evf_get_priv_flags(struct net_device *dev)
{
struct i40evf_adapter *adapter = netdev_priv(dev);
u32 ret_flags = 0;
ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ?
I40EVF_PRIV_FLAGS_PS : 0;
return ret_flags;
}
/**
* i40evf_set_priv_flags - set private flags
* @dev: network interface device structure
* @flags: bit flags to be set
**/
static int i40evf_set_priv_flags(struct net_device *dev, u32 flags)
{
struct i40evf_adapter *adapter = netdev_priv(dev);
bool reset_required = false;
if ((flags & I40EVF_PRIV_FLAGS_PS) &&
!(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
reset_required = true;
} else if (!(flags & I40EVF_PRIV_FLAGS_PS) &&
(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
reset_required = true;
}
/* if needed, issue reset to cause things to take effect */
if (reset_required)
i40evf_schedule_reset(adapter);
return 0;
}
static const struct ethtool_ops i40evf_ethtool_ops = { static const struct ethtool_ops i40evf_ethtool_ops = {
.get_settings = i40evf_get_settings, .get_settings = i40evf_get_settings,
.get_drvinfo = i40evf_get_drvinfo, .get_drvinfo = i40evf_get_drvinfo,
...@@ -572,8 +509,6 @@ static const struct ethtool_ops i40evf_ethtool_ops = { ...@@ -572,8 +509,6 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.get_strings = i40evf_get_strings, .get_strings = i40evf_get_strings,
.get_ethtool_stats = i40evf_get_ethtool_stats, .get_ethtool_stats = i40evf_get_ethtool_stats,
.get_sset_count = i40evf_get_sset_count, .get_sset_count = i40evf_get_sset_count,
.get_priv_flags = i40evf_get_priv_flags,
.set_priv_flags = i40evf_set_priv_flags,
.get_msglevel = i40evf_get_msglevel, .get_msglevel = i40evf_get_msglevel,
.set_msglevel = i40evf_set_msglevel, .set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce, .get_coalesce = i40evf_get_coalesce,
......
...@@ -641,28 +641,11 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) ...@@ -641,28 +641,11 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
static void i40evf_configure_rx(struct i40evf_adapter *adapter) static void i40evf_configure_rx(struct i40evf_adapter *adapter)
{ {
struct i40e_hw *hw = &adapter->hw; struct i40e_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i; int i;
int rx_buf_len;
/* Set the RX buffer length according to the mode */
if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ||
netdev->mtu <= ETH_DATA_LEN)
rx_buf_len = I40EVF_RXBUFFER_2048;
else
rx_buf_len = ALIGN(max_frame, 1024);
for (i = 0; i < adapter->num_active_queues; i++) { for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
adapter->rx_rings[i].rx_buf_len = rx_buf_len; adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048;
if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
set_ring_ps_enabled(&adapter->rx_rings[i]);
adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE;
} else {
clear_ring_ps_enabled(&adapter->rx_rings[i]);
}
} }
} }
...@@ -1007,14 +990,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) ...@@ -1007,14 +990,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
for (i = 0; i < adapter->num_active_queues; i++) { for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *ring = &adapter->rx_rings[i]; struct i40e_ring *ring = &adapter->rx_rings[i];
if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
i40evf_alloc_rx_headers(ring);
i40evf_alloc_rx_buffers_ps(ring, ring->count);
} else {
i40evf_alloc_rx_buffers_1buf(ring, ring->count);
}
ring->next_to_use = ring->count - 1;
writel(ring->next_to_use, ring->tail);
} }
} }
...@@ -2423,11 +2399,6 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2423,11 +2399,6 @@ static void i40evf_init_task(struct work_struct *work)
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE;
/* Default to single buffer rx, can be changed through ethtool. */
adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
netdev->netdev_ops = &i40evf_netdev_ops; netdev->netdev_ops = &i40evf_netdev_ops;
i40evf_set_ethtool_ops(netdev); i40evf_set_ethtool_ops(netdev);
...@@ -2795,7 +2766,6 @@ static void i40evf_remove(struct pci_dev *pdev) ...@@ -2795,7 +2766,6 @@ static void i40evf_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr); iounmap(hw->hw_addr);
pci_release_regions(pdev); pci_release_regions(pdev);
i40evf_free_all_tx_resources(adapter); i40evf_free_all_tx_resources(adapter);
i40evf_free_all_rx_resources(adapter); i40evf_free_all_rx_resources(adapter);
i40evf_free_queues(adapter); i40evf_free_queues(adapter);
......
...@@ -270,10 +270,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) ...@@ -270,10 +270,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->rxq.max_pkt_size = adapter->netdev->mtu vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
vqpi->rxq.splithdr_enabled = true;
vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE;
}
vqpi++; vqpi++;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment