Commit 397df709 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-03-29

This series contains updates to i40e and i40evf only.

Preethi changes the default driver mode of operation to descriptor
write-back for VF.

Alex cleans up and addresses several issues in the way that i40e handles
private flags.  Modifies the driver to use the length of the packet
instead of the DD status bit to determine if a new descriptor is ready
to be processed.  Refactors the driver by pulling the code responsible
for fetching the receive buffer and synchronizing DMA into a single
function.  Also pulled the code responsible for handling buffer
recycling and page counting and distributed it through several functions,
so we can commonize the bits that handle either freeing or recycling the
buffers.  Cleans up the code in preparation for us adding support for
build_skb().  Changed the way we handle the maximum frame size for the
receive path so it is more consistent with other drivers.

Paul enables XL722 to use the direct read/write method since it does not
support the AQ command to read/write the control register.

Christopher fixes a case where we miss an arq element if a new one is
added before we enable interrupts and exit the loop.

Jake cleans up a pointless goto statement.  Also cleaned up a flag that
was not being used.

Carolyn does round 2 for adding a delay to the receive queue to
accommodate the hardware needs.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 14a98e65 d08a9f6c
...@@ -91,14 +91,6 @@ ...@@ -91,14 +91,6 @@
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
/* Ethtool Private Flags */
#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0)
#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4)
#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5)
#define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
#define I40E_NVM_VERSION_HI_SHIFT 12 #define I40E_NVM_VERSION_HI_SHIFT 12
...@@ -397,7 +389,6 @@ struct i40e_pf { ...@@ -397,7 +389,6 @@ struct i40e_pf {
#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
#define I40E_FLAG_RSS_ENABLED BIT_ULL(6) #define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
#define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9) #define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9)
#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) #define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) #define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
...@@ -439,6 +430,7 @@ struct i40e_pf { ...@@ -439,6 +430,7 @@ struct i40e_pf {
#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55) #define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55)
#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56) #define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56)
#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(57) #define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(57)
#define I40E_FLAG_LEGACY_RX BIT_ULL(58)
/* Tracks features that are disabled due to hw limitations. /* Tracks features that are disabled due to hw limitations.
* If a bit is set here, it means that the corresponding * If a bit is set here, it means that the corresponding
......
...@@ -4963,7 +4963,9 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) ...@@ -4963,7 +4963,9 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
int retry = 5; int retry = 5;
u32 val = 0; u32 val = 0;
use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); use_register = (((hw->aq.api_maj_ver == 1) &&
(hw->aq.api_min_ver < 5)) ||
(hw->mac.type == I40E_MAC_X722));
if (!use_register) { if (!use_register) {
do_retry: do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
...@@ -5022,7 +5024,9 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) ...@@ -5022,7 +5024,9 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
bool use_register; bool use_register;
int retry = 5; int retry = 5;
use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); use_register = (((hw->aq.api_maj_ver == 1) &&
(hw->aq.api_min_ver < 5)) ||
(hw->mac.type == I40E_MAC_X722));
if (!use_register) { if (!use_register) {
do_retry: do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr, status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
......
...@@ -207,22 +207,37 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { ...@@ -207,22 +207,37 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { struct i40e_priv_flags {
"MFP", char flag_string[ETH_GSTRING_LEN];
"LinkPolling", u64 flag;
"flow-director-atr", bool read_only;
"veb-stats",
"hw-atr-eviction",
}; };
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings) #define I40E_PRIV_FLAG(_name, _flag, _read_only) { \
.flag_string = _name, \
.flag = _flag, \
.read_only = _read_only, \
}
static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
/* NOTE: MFP setting cannot be changed */
I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1),
I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0),
I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
/* Private flags with a global effect, restricted to PF 0 */ /* Private flags with a global effect, restricted to PF 0 */
static const char i40e_gl_priv_flags_strings[][ETH_GSTRING_LEN] = { static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = {
"vf-true-promisc-support", I40E_PRIV_FLAG("vf-true-promisc-support",
I40E_FLAG_TRUE_PROMISC_SUPPORT, 0),
}; };
#define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_priv_flags_strings) #define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags)
/** /**
* i40e_partition_setting_complaint - generic complaint for MFP restriction * i40e_partition_setting_complaint - generic complaint for MFP restriction
...@@ -1660,12 +1675,18 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, ...@@ -1660,12 +1675,18 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break; break;
case ETH_SS_PRIV_FLAGS: case ETH_SS_PRIV_FLAGS:
memcpy(data, i40e_priv_flags_strings, for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); snprintf(p, ETH_GSTRING_LEN, "%s",
data += I40E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN; i40e_gstrings_priv_flags[i].flag_string);
if (pf->hw.pf_id == 0) p += ETH_GSTRING_LEN;
memcpy(data, i40e_gl_priv_flags_strings, }
I40E_GL_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); if (pf->hw.pf_id != 0)
break;
for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) {
snprintf(p, ETH_GSTRING_LEN, "%s",
i40e_gl_gstrings_priv_flags[i].flag_string);
p += ETH_GSTRING_LEN;
}
break; break;
default: default:
break; break;
...@@ -3952,7 +3973,7 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, ...@@ -3952,7 +3973,7 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
* @dev: network interface device structure * @dev: network interface device structure
* *
* The get string set count and the string set should be matched for each * The get string set count and the string set should be matched for each
* flag returned. Add new strings for each flag to the i40e_priv_flags_strings * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags
* array. * array.
* *
* Returns a u32 bitmap of flags. * Returns a u32 bitmap of flags.
...@@ -3962,19 +3983,27 @@ static u32 i40e_get_priv_flags(struct net_device *dev) ...@@ -3962,19 +3983,27 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
u32 ret_flags = 0; u32 i, j, ret_flags = 0;
ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ? for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0; const struct i40e_priv_flags *priv_flags;
ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
I40E_PRIV_FLAGS_FD_ATR : 0; priv_flags = &i40e_gstrings_priv_flags[i];
ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
I40E_PRIV_FLAGS_VEB_STATS : 0; if (priv_flags->flag & pf->flags)
ret_flags |= pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ? ret_flags |= BIT(i);
0 : I40E_PRIV_FLAGS_HW_ATR_EVICT; }
if (pf->hw.pf_id == 0) {
ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ? if (pf->hw.pf_id != 0)
I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0; return ret_flags;
for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
const struct i40e_priv_flags *priv_flags;
priv_flags = &i40e_gl_gstrings_priv_flags[j];
if (priv_flags->flag & pf->flags)
ret_flags |= BIT(i + j);
} }
return ret_flags; return ret_flags;
...@@ -3990,54 +4019,66 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) ...@@ -3990,54 +4019,66 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
u16 sw_flags = 0, valid_flags = 0; u64 changed_flags;
bool reset_required = false; u32 i, j;
bool promisc_change = false;
int ret;
/* NOTE: MFP is not settable */ changed_flags = pf->flags;
if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG) for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED; const struct i40e_priv_flags *priv_flags;
else
pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED;
/* allow the user to control the state of the Flow priv_flags = &i40e_gstrings_priv_flags[i];
* Director ATR (Application Targeted Routing) feature
* of the driver
*/
if (flags & I40E_PRIV_FLAGS_FD_ATR) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
} else {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
/* flush current ATR settings */ if (priv_flags->read_only)
set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); continue;
if (flags & BIT(i))
pf->flags |= priv_flags->flag;
else
pf->flags &= ~(priv_flags->flag);
} }
if ((flags & I40E_PRIV_FLAGS_VEB_STATS) && if (pf->hw.pf_id != 0)
!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { goto flags_complete;
pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
reset_required = true; for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) {
} else if (!(flags & I40E_PRIV_FLAGS_VEB_STATS) && const struct i40e_priv_flags *priv_flags;
(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; priv_flags = &i40e_gl_gstrings_priv_flags[j];
reset_required = true;
if (priv_flags->read_only)
continue;
if (flags & BIT(i + j))
pf->flags |= priv_flags->flag;
else
pf->flags &= ~(priv_flags->flag);
} }
if (pf->hw.pf_id == 0) { flags_complete:
if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) && /* check for flags that changed */
!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { changed_flags ^= pf->flags;
pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT;
promisc_change = true; /* Process any additional changes needed as a result of flag changes.
} else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) && * The changed_flags value reflects the list of bits that were
(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { * changed in the code above.
pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT; */
promisc_change = true;
} /* Flush current ATR settings if ATR was disabled */
if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
} }
if (promisc_change) {
/* Only allow ATR evict on hardware that is capable of handling it */
if (pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) {
u16 sw_flags = 0, valid_flags = 0;
int ret;
if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
...@@ -4053,14 +4094,11 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) ...@@ -4053,14 +4094,11 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
} }
} }
if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) && /* Issue reset to cause things to take effect, as additional bits
(pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)) * are added we will need to create a mask of bits requiring reset
pf->hw_disabled_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; */
else if ((changed_flags & I40E_FLAG_VEB_STATS_ENABLED) ||
pf->hw_disabled_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE; ((changed_flags & I40E_FLAG_LEGACY_RX) && netif_running(dev)))
/* if needed, issue reset to cause things to take effect */
if (reset_required)
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
return 0; return 0;
......
...@@ -2995,7 +2995,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ...@@ -2995,7 +2995,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->rx_buf_len = vsi->rx_buf_len; ring->rx_buf_len = vsi->rx_buf_len;
rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
rx_ctx.base = (ring->dma / 128); rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count; rx_ctx.qlen = ring->count;
...@@ -3075,17 +3076,18 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) ...@@ -3075,17 +3076,18 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
int err = 0; int err = 0;
u16 i; u16 i;
if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
vsi->max_frame = vsi->netdev->mtu + ETH_HLEN vsi->max_frame = I40E_MAX_RXBUFFER;
+ ETH_FCS_LEN + VLAN_HLEN; vsi->rx_buf_len = I40E_RXBUFFER_2048;
else #if (PAGE_SIZE < 8192)
vsi->max_frame = I40E_RXBUFFER_2048; } else if (vsi->netdev->mtu <= ETH_DATA_LEN) {
vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
vsi->rx_buf_len = I40E_RXBUFFER_2048; vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
#endif
/* round up for the chip's needs */ } else {
vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, vsi->max_frame = I40E_MAX_RXBUFFER;
BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); vsi->rx_buf_len = I40E_RXBUFFER_2048;
}
/* set up individual rings */ /* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++) for (i = 0; i < vsi->num_queue_pairs && !err; i++)
...@@ -4065,6 +4067,12 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) ...@@ -4065,6 +4067,12 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
} }
} }
/* Due to HW errata, on Rx disable only, the register can indicate done
* before it really is. Needs 50ms to be sure
*/
if (!enable)
mdelay(50);
return ret; return ret;
} }
...@@ -5167,10 +5175,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) ...@@ -5167,10 +5175,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
(hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"DCBX offload is not supported or is disabled for this PF.\n"); "DCBX offload is not supported or is disabled for this PF.\n");
if (pf->flags & I40E_FLAG_MFP_ENABLED)
goto out;
} else { } else {
/* When status is not DISABLED then DCBX in FW */ /* When status is not DISABLED then DCBX in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
...@@ -6519,9 +6523,11 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) ...@@ -6519,9 +6523,11 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
opcode); opcode);
break; break;
} }
} while (pending && (i++ < pf->adminq_work_limit)); } while (i++ < pf->adminq_work_limit);
if (i < pf->adminq_work_limit)
clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
/* re-enable Admin queue interrupt cause */ /* re-enable Admin queue interrupt cause */
val = rd32(hw, I40E_PFINT_ICR0_ENA); val = rd32(hw, I40E_PFINT_ICR0_ENA);
val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
......
...@@ -1294,6 +1294,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, ...@@ -1294,6 +1294,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
bi->dma = dma; bi->dma = dma;
bi->page = page; bi->page = page;
bi->page_offset = 0; bi->page_offset = 0;
/* initialize pagecnt_bias to 1 representing we fully own page */
bi->pagecnt_bias = 1; bi->pagecnt_bias = 1;
return true; return true;
...@@ -1622,8 +1624,6 @@ static inline bool i40e_page_is_reusable(struct page *page) ...@@ -1622,8 +1624,6 @@ static inline bool i40e_page_is_reusable(struct page *page)
* the adapter for another receive * the adapter for another receive
* *
* @rx_buffer: buffer containing the page * @rx_buffer: buffer containing the page
* @page: page address from rx_buffer
* @truesize: actual size of the buffer in this page
* *
* If page is reusable, rx_buffer->page_offset is adjusted to point to * If page is reusable, rx_buffer->page_offset is adjusted to point to
* an unused region in the page. * an unused region in the page.
...@@ -1646,14 +1646,13 @@ static inline bool i40e_page_is_reusable(struct page *page) ...@@ -1646,14 +1646,13 @@ static inline bool i40e_page_is_reusable(struct page *page)
* *
* In either case, if the page is reusable its refcount is increased. * In either case, if the page is reusable its refcount is increased.
**/ **/
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
struct page *page,
const unsigned int truesize)
{ {
#if (PAGE_SIZE >= 8192) #if (PAGE_SIZE >= 8192)
unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif #endif
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
/* Is any reuse possible? */ /* Is any reuse possible? */
if (unlikely(!i40e_page_is_reusable(page))) if (unlikely(!i40e_page_is_reusable(page)))
...@@ -1661,15 +1660,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1661,15 +1660,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != pagecnt_bias)) if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false; return false;
/* flip page offset to other buffer */
rx_buffer->page_offset ^= truesize;
#else #else
/* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > last_offset) if (rx_buffer->page_offset > last_offset)
return false; return false;
#endif #endif
...@@ -1678,10 +1671,11 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1678,10 +1671,11 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
* the pagecnt_bias and page count so that we fully restock the * the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds. * number of references the driver holds.
*/ */
if (unlikely(pagecnt_bias == 1)) { if (unlikely(!pagecnt_bias)) {
page_ref_add(page, USHRT_MAX); page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX; rx_buffer->pagecnt_bias = USHRT_MAX;
} }
return true; return true;
} }
...@@ -1689,131 +1683,142 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1689,131 +1683,142 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add * @rx_buffer: buffer containing page to add
* @size: packet length from rx_desc
* @skb: sk_buff to place the data into * @skb: sk_buff to place the data into
* @size: packet length from rx_desc
* *
* This function will add the data contained in rx_buffer->page to the skb. * This function will add the data contained in rx_buffer->page to the skb.
* This is done either through a direct copy if the data in the buffer is * It will just attach the page as a frag to the skb.
* less than the skb header size, otherwise it will just attach the page as
* a frag to the skb.
* *
* The function will then update the page offset if necessary and return * The function will then update the page offset.
* true if the buffer can be reused by the adapter.
**/ **/
static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer, struct i40e_rx_buffer *rx_buffer,
unsigned int size, struct sk_buff *skb,
struct sk_buff *skb) unsigned int size)
{ {
struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
unsigned int truesize = I40E_RXBUFFER_2048; unsigned int truesize = I40E_RXBUFFER_2048;
#else #else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); unsigned int truesize = SKB_DATA_ALIGN(size);
#endif #endif
unsigned int pull_len;
if (unlikely(skb_is_nonlinear(skb))) skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
goto add_tail_frag; rx_buffer->page_offset, size, truesize);
/* will the data fit in the skb we allocated? if so, just /* page is being used so we must update the page offset */
* copy it as it is pretty small anyway #if (PAGE_SIZE < 8192)
*/ rx_buffer->page_offset ^= truesize;
if (size <= I40E_RX_HDR_SIZE) { #else
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); rx_buffer->page_offset += truesize;
#endif
/* page is reusable, we can reuse buffer as-is */ }
if (likely(i40e_page_is_reusable(page)))
return true;
/* this page cannot be reused so discard it */
return false;
}
/* we need the header to contain the greater of either /**
* ETH_HLEN or 60 bytes if the skb->len is less than * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* 60 for skb_pad. * @rx_ring: rx descriptor ring to transact packets on
*/ * @size: size of buffer to add to skb
pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); *
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
const unsigned int size)
{
struct i40e_rx_buffer *rx_buffer;
/* align pull length to size of long to optimize rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
* memcpy performance prefetchw(rx_buffer->page);
*/
memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
/* update all of the pointers */ /* we are reusing so sync this buffer for CPU use */
va += pull_len; dma_sync_single_range_for_cpu(rx_ring->dev,
size -= pull_len; rx_buffer->dma,
rx_buffer->page_offset,
size,
DMA_FROM_DEVICE);
add_tail_frag: /* We have pulled a buffer for use, so decrement pagecnt_bias */
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, rx_buffer->pagecnt_bias--;
(unsigned long)va & ~PAGE_MASK, size, truesize);
return i40e_can_reuse_rx_page(rx_buffer, page, truesize); return rx_buffer;
} }
/** /**
* i40e_fetch_rx_buffer - Allocate skb and populate it * i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
* @rx_desc: descriptor containing info written by hardware * @rx_buffer: rx buffer to pull data from
* @size: size of buffer to add to skb
* *
* This function allocates an skb on the fly, and populates it with the page * This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the skb * data from the current receive descriptor, taking care to set up the
* correctly, as well as handling calling the page recycle function if * skb correctly.
* necessary.
*/ */
static inline static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer,
union i40e_rx_desc *rx_desc, unsigned int size)
struct sk_buff *skb)
{ {
u64 local_status_error_len = void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
le64_to_cpu(rx_desc->wb.qword1.status_error_len); #if (PAGE_SIZE < 8192)
unsigned int size = unsigned int truesize = I40E_RXBUFFER_2048;
(local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> #else
I40E_RXD_QW1_LENGTH_PBUF_SHIFT; unsigned int truesize = SKB_DATA_ALIGN(size);
struct i40e_rx_buffer *rx_buffer; #endif
struct page *page; unsigned int headlen;
struct sk_buff *skb;
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
if (likely(!skb)) {
void *page_addr = page_address(page) + rx_buffer->page_offset;
/* prefetch first cache line of first page */ /* prefetch first cache line of first page */
prefetch(page_addr); prefetch(va);
#if L1_CACHE_BYTES < 128 #if L1_CACHE_BYTES < 128
prefetch(page_addr + L1_CACHE_BYTES); prefetch(va + L1_CACHE_BYTES);
#endif #endif
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
I40E_RX_HDR_SIZE, I40E_RX_HDR_SIZE,
GFP_ATOMIC | __GFP_NOWARN); GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) { if (unlikely(!skb))
rx_ring->rx_stats.alloc_buff_failed++; return NULL;
return NULL;
}
/* we will be copying header into skb->data in /* Determine available headroom for copy */
* pskb_may_pull so it is in our interest to prefetch headlen = size;
* it now to avoid a possible cache miss if (headlen > I40E_RX_HDR_SIZE)
*/ headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
prefetchw(skb->data);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
/* update all of the pointers */
size -= headlen;
if (size) {
skb_add_rx_frag(skb, 0, rx_buffer->page,
rx_buffer->page_offset + headlen,
size, truesize);
/* buffer is used by skb, update page_offset */
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
} else {
/* buffer is unused, reset bias back to rx_buffer */
rx_buffer->pagecnt_bias++;
} }
/* we are reusing so sync this buffer for CPU use */ return skb;
dma_sync_single_range_for_cpu(rx_ring->dev, }
rx_buffer->dma,
rx_buffer->page_offset,
size,
DMA_FROM_DEVICE);
/* pull page into skb */ /**
if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) { * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
*
* This function will clean up the contents of the rx_buffer. It will
* either recycle the bufer or unmap it and free the associated resources.
*/
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer)
{
if (i40e_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */ /* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer); i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++; rx_ring->rx_stats.page_reuse_count++;
...@@ -1827,8 +1832,6 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, ...@@ -1827,8 +1832,6 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
/* clear contents of buffer_info */ /* clear contents of buffer_info */
rx_buffer->page = NULL; rx_buffer->page = NULL;
return skb;
} }
/** /**
...@@ -1889,7 +1892,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1889,7 +1892,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
bool failure = false; bool failure = false;
while (likely(total_rx_packets < budget)) { while (likely(total_rx_packets < budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
unsigned int size;
u16 vlan_tag; u16 vlan_tag;
u8 rx_ptype; u8 rx_ptype;
u64 qword; u64 qword;
...@@ -1906,22 +1911,36 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1906,22 +1911,36 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* status_error_len will always be zero for unused descriptors /* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr * because it's cleared in cleanup, and overlaps with hdr_addr
* which is always zero because packet split isn't used, if the * which is always zero because packet split isn't used, if the
* hardware wrote DD then it will be non-zero * hardware wrote DD then the length will be non-zero
*/ */
if (!i40e_test_staterr(rx_desc, qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
if (!size)
break; break;
/* This memory barrier is needed to keep us from reading /* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the * any other fields out of the rx_desc until we have
* DD bit is set. * verified the descriptor has been written back.
*/ */
dma_rmb(); dma_rmb();
skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb); rx_buffer = i40e_get_rx_buffer(rx_ring, size);
if (!skb)
/* retrieve a buffer from the ring */
if (skb)
i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
else
skb = i40e_construct_skb(rx_ring, rx_buffer, size);
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
rx_buffer->pagecnt_bias++;
break; break;
}
i40e_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++; cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb)) if (i40e_is_non_eop(rx_ring, rx_desc, skb))
......
...@@ -117,10 +117,8 @@ enum i40e_dyn_idx_t { ...@@ -117,10 +117,8 @@ enum i40e_dyn_idx_t {
/* Supported Rx Buffer Sizes (a multiple of 128) */ /* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256 #define I40E_RXBUFFER_256 256
#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
#define I40E_RXBUFFER_2048 2048 #define I40E_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
#define I40E_RXBUFFER_4096 4096
#define I40E_RXBUFFER_8192 8192
#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
......
...@@ -958,7 +958,9 @@ u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) ...@@ -958,7 +958,9 @@ u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
int retry = 5; int retry = 5;
u32 val = 0; u32 val = 0;
use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); use_register = (((hw->aq.api_maj_ver == 1) &&
(hw->aq.api_min_ver < 5)) ||
(hw->mac.type == I40E_MAC_X722));
if (!use_register) { if (!use_register) {
do_retry: do_retry:
status = i40evf_aq_rx_ctl_read_register(hw, reg_addr, status = i40evf_aq_rx_ctl_read_register(hw, reg_addr,
...@@ -1019,7 +1021,9 @@ void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) ...@@ -1019,7 +1021,9 @@ void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
bool use_register; bool use_register;
int retry = 5; int retry = 5;
use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); use_register = (((hw->aq.api_maj_ver == 1) &&
(hw->aq.api_min_ver < 5)) ||
(hw->mac.type == I40E_MAC_X722));
if (!use_register) { if (!use_register) {
do_retry: do_retry:
status = i40evf_aq_rx_ctl_write_register(hw, reg_addr, status = i40evf_aq_rx_ctl_write_register(hw, reg_addr,
......
...@@ -137,10 +137,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) ...@@ -137,10 +137,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
{ {
u32 head, tail; u32 head, tail;
if (!in_sw) head = ring->next_to_clean;
head = i40e_get_head(ring);
else
head = ring->next_to_clean;
tail = readl(ring->tail); tail = readl(ring->tail);
if (head != tail) if (head != tail)
...@@ -165,7 +162,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -165,7 +162,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
{ {
u16 i = tx_ring->next_to_clean; u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf; struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc; struct i40e_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0; unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = vsi->work_limit; unsigned int budget = vsi->work_limit;
...@@ -174,8 +170,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -174,8 +170,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_desc = I40E_TX_DESC(tx_ring, i); tx_desc = I40E_TX_DESC(tx_ring, i);
i -= tx_ring->count; i -= tx_ring->count;
tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
do { do {
struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
...@@ -186,8 +180,9 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -186,8 +180,9 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* prevent any other reads prior to eop_desc */ /* prevent any other reads prior to eop_desc */
read_barrier_depends(); read_barrier_depends();
/* we have caught up to head, no work left to do */ /* if the descriptor isn't done, no work yet to do */
if (tx_head == tx_desc) if (!(eop_desc->cmd_type_offset_bsz &
cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
break; break;
/* clear next_to_watch to prevent false hangs */ /* clear next_to_watch to prevent false hangs */
...@@ -464,10 +459,6 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) ...@@ -464,10 +459,6 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
/* round up to nearest 4K */ /* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
/* add u32 for head writeback, align after this takes care of
* guaranteeing this is at least one cache line in size
*/
tx_ring->size += sizeof(u32);
tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL); &tx_ring->dma, GFP_KERNEL);
...@@ -671,6 +662,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, ...@@ -671,6 +662,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
bi->dma = dma; bi->dma = dma;
bi->page = page; bi->page = page;
bi->page_offset = 0; bi->page_offset = 0;
/* initialize pagecnt_bias to 1 representing we fully own page */
bi->pagecnt_bias = 1; bi->pagecnt_bias = 1;
return true; return true;
...@@ -989,8 +982,6 @@ static inline bool i40e_page_is_reusable(struct page *page) ...@@ -989,8 +982,6 @@ static inline bool i40e_page_is_reusable(struct page *page)
* the adapter for another receive * the adapter for another receive
* *
* @rx_buffer: buffer containing the page * @rx_buffer: buffer containing the page
* @page: page address from rx_buffer
* @truesize: actual size of the buffer in this page
* *
* If page is reusable, rx_buffer->page_offset is adjusted to point to * If page is reusable, rx_buffer->page_offset is adjusted to point to
* an unused region in the page. * an unused region in the page.
...@@ -1013,14 +1004,13 @@ static inline bool i40e_page_is_reusable(struct page *page) ...@@ -1013,14 +1004,13 @@ static inline bool i40e_page_is_reusable(struct page *page)
* *
* In either case, if the page is reusable its refcount is increased. * In either case, if the page is reusable its refcount is increased.
**/ **/
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
struct page *page,
const unsigned int truesize)
{ {
#if (PAGE_SIZE >= 8192) #if (PAGE_SIZE >= 8192)
unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif #endif
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
/* Is any reuse possible? */ /* Is any reuse possible? */
if (unlikely(!i40e_page_is_reusable(page))) if (unlikely(!i40e_page_is_reusable(page)))
...@@ -1028,15 +1018,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1028,15 +1018,9 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != pagecnt_bias)) if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false; return false;
/* flip page offset to other buffer */
rx_buffer->page_offset ^= truesize;
#else #else
/* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > last_offset) if (rx_buffer->page_offset > last_offset)
return false; return false;
#endif #endif
...@@ -1045,7 +1029,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1045,7 +1029,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
* the pagecnt_bias and page count so that we fully restock the * the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds. * number of references the driver holds.
*/ */
if (unlikely(pagecnt_bias == 1)) { if (unlikely(!pagecnt_bias)) {
page_ref_add(page, USHRT_MAX); page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX; rx_buffer->pagecnt_bias = USHRT_MAX;
} }
...@@ -1057,131 +1041,142 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1057,131 +1041,142 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add * @rx_buffer: buffer containing page to add
* @size: packet length from rx_desc
* @skb: sk_buff to place the data into * @skb: sk_buff to place the data into
* @size: packet length from rx_desc
* *
* This function will add the data contained in rx_buffer->page to the skb. * This function will add the data contained in rx_buffer->page to the skb.
* This is done either through a direct copy if the data in the buffer is * It will just attach the page as a frag to the skb.
* less than the skb header size, otherwise it will just attach the page as
* a frag to the skb.
* *
* The function will then update the page offset if necessary and return * The function will then update the page offset.
* true if the buffer can be reused by the adapter.
**/ **/
static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer, struct i40e_rx_buffer *rx_buffer,
unsigned int size, struct sk_buff *skb,
struct sk_buff *skb) unsigned int size)
{ {
struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
unsigned int truesize = I40E_RXBUFFER_2048; unsigned int truesize = I40E_RXBUFFER_2048;
#else #else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); unsigned int truesize = SKB_DATA_ALIGN(size);
#endif #endif
unsigned int pull_len;
if (unlikely(skb_is_nonlinear(skb)))
goto add_tail_frag;
/* will the data fit in the skb we allocated? if so, just
* copy it as it is pretty small anyway
*/
if (size <= I40E_RX_HDR_SIZE) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is reusable, we can reuse buffer as-is */ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
if (likely(i40e_page_is_reusable(page))) rx_buffer->page_offset, size, truesize);
return true;
/* this page cannot be reused so discard it */ /* page is being used so we must update the page offset */
return false; #if (PAGE_SIZE < 8192)
} rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
}
/* we need the header to contain the greater of either /**
* ETH_HLEN or 60 bytes if the skb->len is less than * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* 60 for skb_pad. * @rx_ring: rx descriptor ring to transact packets on
*/ * @size: size of buffer to add to skb
pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); *
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
const unsigned int size)
{
struct i40e_rx_buffer *rx_buffer;
/* align pull length to size of long to optimize rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
* memcpy performance prefetchw(rx_buffer->page);
*/
memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
/* update all of the pointers */ /* we are reusing so sync this buffer for CPU use */
va += pull_len; dma_sync_single_range_for_cpu(rx_ring->dev,
size -= pull_len; rx_buffer->dma,
rx_buffer->page_offset,
size,
DMA_FROM_DEVICE);
add_tail_frag: /* We have pulled a buffer for use, so decrement pagecnt_bias */
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, rx_buffer->pagecnt_bias--;
(unsigned long)va & ~PAGE_MASK, size, truesize);
return i40e_can_reuse_rx_page(rx_buffer, page, truesize); return rx_buffer;
} }
/** /**
* i40evf_fetch_rx_buffer - Allocate skb and populate it * i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
* @rx_desc: descriptor containing info written by hardware * @rx_buffer: rx buffer to pull data from
* @size: size of buffer to add to skb
* *
* This function allocates an skb on the fly, and populates it with the page * This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the skb * data from the current receive descriptor, taking care to set up the
* correctly, as well as handling calling the page recycle function if * skb correctly.
* necessary.
*/ */
static inline static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer,
union i40e_rx_desc *rx_desc, unsigned int size)
struct sk_buff *skb)
{ {
u64 local_status_error_len = void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
le64_to_cpu(rx_desc->wb.qword1.status_error_len); #if (PAGE_SIZE < 8192)
unsigned int size = unsigned int truesize = I40E_RXBUFFER_2048;
(local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> #else
I40E_RXD_QW1_LENGTH_PBUF_SHIFT; unsigned int truesize = SKB_DATA_ALIGN(size);
struct i40e_rx_buffer *rx_buffer; #endif
struct page *page; unsigned int headlen;
struct sk_buff *skb;
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);
if (likely(!skb)) {
void *page_addr = page_address(page) + rx_buffer->page_offset;
/* prefetch first cache line of first page */ /* prefetch first cache line of first page */
prefetch(page_addr); prefetch(va);
#if L1_CACHE_BYTES < 128 #if L1_CACHE_BYTES < 128
prefetch(page_addr + L1_CACHE_BYTES); prefetch(va + L1_CACHE_BYTES);
#endif #endif
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
I40E_RX_HDR_SIZE, I40E_RX_HDR_SIZE,
GFP_ATOMIC | __GFP_NOWARN); GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) { if (unlikely(!skb))
rx_ring->rx_stats.alloc_buff_failed++; return NULL;
return NULL;
}
/* we will be copying header into skb->data in /* Determine available headroom for copy */
* pskb_may_pull so it is in our interest to prefetch headlen = size;
* it now to avoid a possible cache miss if (headlen > I40E_RX_HDR_SIZE)
*/ headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
prefetchw(skb->data);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
/* update all of the pointers */
size -= headlen;
if (size) {
skb_add_rx_frag(skb, 0, rx_buffer->page,
rx_buffer->page_offset + headlen,
size, truesize);
/* buffer is used by skb, update page_offset */
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
} else {
/* buffer is unused, reset bias back to rx_buffer */
rx_buffer->pagecnt_bias++;
} }
/* we are reusing so sync this buffer for CPU use */ return skb;
dma_sync_single_range_for_cpu(rx_ring->dev, }
rx_buffer->dma,
rx_buffer->page_offset,
size,
DMA_FROM_DEVICE);
/* pull page into skb */ /**
if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) { * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
*
* This function will clean up the contents of the rx_buffer. It will
* either recycle the bufer or unmap it and free the associated resources.
*/
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer)
{
if (i40e_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */ /* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer); i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++; rx_ring->rx_stats.page_reuse_count++;
...@@ -1195,8 +1190,6 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, ...@@ -1195,8 +1190,6 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
/* clear contents of buffer_info */ /* clear contents of buffer_info */
rx_buffer->page = NULL; rx_buffer->page = NULL;
return skb;
} }
/** /**
...@@ -1252,7 +1245,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1252,7 +1245,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
bool failure = false; bool failure = false;
while (likely(total_rx_packets < budget)) { while (likely(total_rx_packets < budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
unsigned int size;
u16 vlan_tag; u16 vlan_tag;
u8 rx_ptype; u8 rx_ptype;
u64 qword; u64 qword;
...@@ -1269,22 +1264,36 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1269,22 +1264,36 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* status_error_len will always be zero for unused descriptors /* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr * because it's cleared in cleanup, and overlaps with hdr_addr
* which is always zero because packet split isn't used, if the * which is always zero because packet split isn't used, if the
* hardware wrote DD then it will be non-zero * hardware wrote DD then the length will be non-zero
*/ */
if (!i40e_test_staterr(rx_desc, qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
if (!size)
break; break;
/* This memory barrier is needed to keep us from reading /* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the * any other fields out of the rx_desc until we have
* DD bit is set. * verified the descriptor has been written back.
*/ */
dma_rmb(); dma_rmb();
skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb); rx_buffer = i40e_get_rx_buffer(rx_ring, size);
if (!skb)
/* retrieve a buffer from the ring */
if (skb)
i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
else
skb = i40e_construct_skb(rx_ring, rx_buffer, size);
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
rx_buffer->pagecnt_bias++;
break; break;
}
i40e_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++; cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb)) if (i40e_is_non_eop(rx_ring, rx_desc, skb))
...@@ -2012,7 +2021,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2012,7 +2021,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i = tx_ring->next_to_use; u16 i = tx_ring->next_to_use;
u32 td_tag = 0; u32 td_tag = 0;
dma_addr_t dma; dma_addr_t dma;
u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
...@@ -2048,7 +2056,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2048,7 +2056,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++; tx_desc++;
i++; i++;
desc_count++;
if (i == tx_ring->count) { if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0); tx_desc = I40E_TX_DESC(tx_ring, 0);
...@@ -2070,7 +2077,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2070,7 +2077,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++; tx_desc++;
i++; i++;
desc_count++;
if (i == tx_ring->count) { if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0); tx_desc = I40E_TX_DESC(tx_ring, 0);
...@@ -2096,46 +2102,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2096,46 +2102,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* write last descriptor with EOP bit */ /* write last descriptor with RS and EOP bits */
td_cmd |= I40E_TX_DESC_CMD_EOP; td_cmd |= I40E_TXD_CMD;
/* We can OR these values together as they both are checked against
* 4 below and at this point desc_count will be used as a boolean value
* after this if/else block.
*/
desc_count |= ++tx_ring->packet_stride;
/* Algorithm to optimize tail and RS bit setting:
* if queue is stopped
* mark RS bit
* reset packet counter
* else if xmit_more is supported and is true
* advance packet counter to 4
* reset desc_count to 0
*
* if desc_count >= 4
* mark RS bit
* reset packet counter
* if desc_count > 0
* update tail
*
* Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will
* trigger a force WB.
*/
if (netif_xmit_stopped(txring_txq(tx_ring))) {
goto do_rs;
} else if (skb->xmit_more) {
/* set stride to arm on next packet and reset desc_count */
tx_ring->packet_stride = WB_STRIDE;
desc_count = 0;
} else if (desc_count >= WB_STRIDE) {
do_rs:
/* write last descriptor with RS bit set */
td_cmd |= I40E_TX_DESC_CMD_RS;
tx_ring->packet_stride = 0;
}
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag); build_ctob(td_cmd, td_offset, size, td_tag);
...@@ -2151,7 +2119,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2151,7 +2119,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
first->next_to_watch = tx_desc; first->next_to_watch = tx_desc;
/* notify HW of packet */ /* notify HW of packet */
if (desc_count) { if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail); writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail /* we need this if more than one processor can write to our tail
......
...@@ -104,10 +104,8 @@ enum i40e_dyn_idx_t { ...@@ -104,10 +104,8 @@ enum i40e_dyn_idx_t {
/* Supported Rx Buffer Sizes (a multiple of 128) */ /* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256 #define I40E_RXBUFFER_256 256
#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
#define I40E_RXBUFFER_2048 2048 #define I40E_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
#define I40E_RXBUFFER_4096 4096
#define I40E_RXBUFFER_8192 8192
#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
...@@ -392,20 +390,6 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); ...@@ -392,20 +390,6 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size); int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40evf_chk_linearize(struct sk_buff *skb); bool __i40evf_chk_linearize(struct sk_buff *skb);
/**
* i40e_get_head - Retrieve head from head writeback
* @tx_ring: Tx ring to fetch head of
*
* Returns value of Tx ring head based on value stored
* in head write-back location
**/
static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
{
void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
return le32_to_cpu(*(volatile __le32 *)head);
}
/** /**
* i40e_xmit_descriptor_count - calculate number of Tx descriptors needed * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer * @skb: send buffer
......
...@@ -72,10 +72,6 @@ struct i40e_vsi { ...@@ -72,10 +72,6 @@ struct i40e_vsi {
#define I40EVF_MAX_RXD 4096 #define I40EVF_MAX_RXD 4096
#define I40EVF_MIN_RXD 64 #define I40EVF_MIN_RXD 64
#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 #define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
/* Supported Rx Buffer Sizes */
#define I40EVF_RXBUFFER_2048 2048
#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define I40EVF_MAX_AQ_BUF_SIZE 4096 #define I40EVF_MAX_AQ_BUF_SIZE 4096
#define I40EVF_AQ_LEN 32 #define I40EVF_AQ_LEN 32
#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ #define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */
...@@ -222,6 +218,7 @@ struct i40evf_adapter { ...@@ -222,6 +218,7 @@ struct i40evf_adapter {
#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(17) #define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(17)
#define I40EVF_FLAG_PROMISC_ON BIT(18) #define I40EVF_FLAG_PROMISC_ON BIT(18)
#define I40EVF_FLAG_ALLMULTI_ON BIT(19) #define I40EVF_FLAG_ALLMULTI_ON BIT(19)
#define I40EVF_FLAG_LEGACY_RX BIT(20)
/* duplicates for common code */ /* duplicates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0
...@@ -229,6 +226,7 @@ struct i40evf_adapter { ...@@ -229,6 +226,7 @@ struct i40evf_adapter {
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE #define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE
#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE #define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
#define I40E_FLAG_LEGACY_RX I40EVF_FLAG_LEGACY_RX
/* flags for admin queue service task */ /* flags for admin queue service task */
u32 aq_required; u32 aq_required;
#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0) #define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
......
...@@ -63,6 +63,29 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { ...@@ -63,6 +63,29 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
#define I40EVF_STATS_LEN(_dev) \ #define I40EVF_STATS_LEN(_dev) \
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
/* For now we have one and only one private flag and it is only defined
* when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
* of leaving all this code sitting around empty we will strip it unless
* our one private flag is actually available.
*/
struct i40evf_priv_flags {
char flag_string[ETH_GSTRING_LEN];
u32 flag;
bool read_only;
};
#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \
.flag_string = _name, \
.flag = _flag, \
.read_only = _read_only, \
}
static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = {
I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0),
};
#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags)
/** /**
* i40evf_get_link_ksettings - Get Link Speed and Duplex settings * i40evf_get_link_ksettings - Get Link Speed and Duplex settings
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -124,6 +147,8 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset) ...@@ -124,6 +147,8 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset)
{ {
if (sset == ETH_SS_STATS) if (sset == ETH_SS_STATS)
return I40EVF_STATS_LEN(netdev); return I40EVF_STATS_LEN(netdev);
else if (sset == ETH_SS_PRIV_FLAGS)
return I40EVF_PRIV_FLAGS_STR_LEN;
else else
return -EINVAL; return -EINVAL;
} }
...@@ -189,7 +214,83 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) ...@@ -189,7 +214,83 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
} else if (sset == ETH_SS_PRIV_FLAGS) {
for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
snprintf(p, ETH_GSTRING_LEN, "%s",
i40evf_gstrings_priv_flags[i].flag_string);
p += ETH_GSTRING_LEN;
}
}
}
/**
* i40evf_get_priv_flags - report device private flags
* @dev: network interface device structure
*
* The get string set count and the string set should be matched for each
* flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags
* array.
*
* Returns a u32 bitmap of flags.
**/
static u32 i40evf_get_priv_flags(struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
u32 i, ret_flags = 0;
for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
const struct i40evf_priv_flags *priv_flags;
priv_flags = &i40evf_gstrings_priv_flags[i];
if (priv_flags->flag & adapter->flags)
ret_flags |= BIT(i);
}
return ret_flags;
}
/**
* i40evf_set_priv_flags - set private flags
* @dev: network interface device structure
* @flags: bit flags to be set
**/
static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
u64 changed_flags;
u32 i;
changed_flags = adapter->flags;
for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
const struct i40evf_priv_flags *priv_flags;
priv_flags = &i40evf_gstrings_priv_flags[i];
if (priv_flags->read_only)
continue;
if (flags & BIT(i))
adapter->flags |= priv_flags->flag;
else
adapter->flags &= ~(priv_flags->flag);
}
/* check for flags that changed */
changed_flags ^= adapter->flags;
/* Process any additional changes needed as a result of flag changes. */
/* issue a reset to force legacy-rx change to take effect */
if (changed_flags & I40EVF_FLAG_LEGACY_RX) {
if (netif_running(netdev)) {
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task);
}
} }
return 0;
} }
/** /**
...@@ -238,6 +339,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev, ...@@ -238,6 +339,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, i40evf_driver_version, 32); strlcpy(drvinfo->version, i40evf_driver_version, 32);
strlcpy(drvinfo->fw_version, "N/A", 4); strlcpy(drvinfo->fw_version, "N/A", 4);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
} }
/** /**
...@@ -649,6 +751,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = { ...@@ -649,6 +751,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.get_strings = i40evf_get_strings, .get_strings = i40evf_get_strings,
.get_ethtool_stats = i40evf_get_ethtool_stats, .get_ethtool_stats = i40evf_get_ethtool_stats,
.get_sset_count = i40evf_get_sset_count, .get_sset_count = i40evf_get_sset_count,
.get_priv_flags = i40evf_get_priv_flags,
.set_priv_flags = i40evf_set_priv_flags,
.get_msglevel = i40evf_get_msglevel, .get_msglevel = i40evf_get_msglevel,
.set_msglevel = i40evf_set_msglevel, .set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce, .get_coalesce = i40evf_get_coalesce,
......
...@@ -686,12 +686,26 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) ...@@ -686,12 +686,26 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
**/ **/
static void i40evf_configure_rx(struct i40evf_adapter *adapter) static void i40evf_configure_rx(struct i40evf_adapter *adapter)
{ {
unsigned int rx_buf_len = I40E_RXBUFFER_2048;
struct net_device *netdev = adapter->netdev;
struct i40e_hw *hw = &adapter->hw; struct i40e_hw *hw = &adapter->hw;
int i; int i;
/* Legacy Rx will always default to a 2048 buffer size. */
#if (PAGE_SIZE < 8192)
if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
/* We use a 1536 buffer size for configurations with
* standard Ethernet mtu. On x86 this gives us enough room
* for shared info and 192 bytes of padding.
*/
if (netdev->mtu <= ETH_DATA_LEN)
rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
}
#endif
for (i = 0; i < adapter->num_active_queues; i++) { for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048; adapter->rx_rings[i].rx_buf_len = rx_buf_len;
} }
} }
......
...@@ -234,7 +234,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) ...@@ -234,7 +234,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
struct i40e_virtchnl_vsi_queue_config_info *vqci; struct i40e_virtchnl_vsi_queue_config_info *vqci;
struct i40e_virtchnl_queue_pair_info *vqpi; struct i40e_virtchnl_queue_pair_info *vqpi;
int pairs = adapter->num_active_queues; int pairs = adapter->num_active_queues;
int i, len; int i, len, max_frame = I40E_MAX_RXBUFFER;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
...@@ -249,6 +249,11 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) ...@@ -249,6 +249,11 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
if (!vqci) if (!vqci)
return; return;
/* Limit maximum frame size when jumbo frames is not enabled */
if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) &&
(adapter->netdev->mtu <= ETH_DATA_LEN))
max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
vqci->vsi_id = adapter->vsi_res->vsi_id; vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs; vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair; vqpi = vqci->qpair;
...@@ -260,17 +265,14 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) ...@@ -260,17 +265,14 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->txq.queue_id = i; vqpi->txq.queue_id = i;
vqpi->txq.ring_len = adapter->tx_rings[i].count; vqpi->txq.ring_len = adapter->tx_rings[i].count;
vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma; vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
vqpi->txq.headwb_enabled = 1;
vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
(vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
vqpi->rxq.vsi_id = vqci->vsi_id; vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i; vqpi->rxq.queue_id = i;
vqpi->rxq.ring_len = adapter->rx_rings[i].count; vqpi->rxq.ring_len = adapter->rx_rings[i].count;
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = adapter->netdev->mtu vqpi->rxq.max_pkt_size = max_frame;
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; vqpi->rxq.databuffer_size =
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; ALIGN(adapter->rx_rings[i].rx_buf_len,
BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
vqpi++; vqpi++;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment