Commit 8063968a authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2014-06-08

This series contains updates to i40e and i40evf.

Jesse fixes an issue reported by Eric Dumazet where the driver was not
masking the right bits in the receive descriptor before checking them.
Also fixes TSO accounting since the kernel now can send as much as 32kB
in a single skb->frag[.] entry, even on a system with 4kB pages.

Anjali cleans up registers which are no longer supported.

Akeem cleans up code comments and removes num_msix_entries from the
interrupt setup routine since it was not being used.  Fixes an issue where
FD SB/ATR and NTUPLE configuration status were reported erroneously, so
now the driver reports FDir without further information.  Fixes a coding
error where during the registration for NAPI, the driver was requesting
256 budget.  The max recommended value for this NAPI_POLL_WEIGHT or 64.
Lastly, removed deprecated device IDs because they will not be shipped.

Mitch removes log messages which were redundant so therefore unnecessary.
Also removes a bogus code comment since VF drivers require MSI-X or they
won't get interrupts at all and cleans up the formatting of several log
messages.  Mitch also fixes the possibility of null pointers in VSI, since
not all VSIs have transmit rings.

Shannon ensures to clear the PXE mode bit on each reset after the AdminQ
has been rebuilt.

Catherine bumps the driver versions for i40e and i40evf.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fff1f59b eeb6b645
...@@ -201,7 +201,6 @@ struct i40e_pf { ...@@ -201,7 +201,6 @@ struct i40e_pf {
unsigned long state; unsigned long state;
unsigned long link_check_timeout; unsigned long link_check_timeout;
struct msix_entry *msix_entries; struct msix_entry *msix_entries;
u16 num_msix_entries;
bool fc_autoneg_status; bool fc_autoneg_status;
u16 eeprom_version; u16 eeprom_version;
......
...@@ -43,12 +43,10 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) ...@@ -43,12 +43,10 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) { switch (hw->device_id) {
case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_SFP_XL710:
case I40E_DEV_ID_SFP_X710:
case I40E_DEV_ID_QEMU: case I40E_DEV_ID_QEMU:
case I40E_DEV_ID_KX_A: case I40E_DEV_ID_KX_A:
case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_B:
case I40E_DEV_ID_KX_C: case I40E_DEV_ID_KX_C:
case I40E_DEV_ID_KX_D:
case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_QSFP_C:
...@@ -2514,7 +2512,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, ...@@ -2514,7 +2512,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
{ {
u32 fcoe_cntx_size, fcoe_filt_size; u32 fcoe_cntx_size, fcoe_filt_size;
u32 pe_cntx_size, pe_filt_size; u32 pe_cntx_size, pe_filt_size;
u32 fcoe_fmax, pe_fmax; u32 fcoe_fmax;
u32 val; u32 val;
/* Validate FCoE settings passed */ /* Validate FCoE settings passed */
...@@ -2589,13 +2587,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, ...@@ -2589,13 +2587,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
return I40E_ERR_INVALID_SIZE; return I40E_ERR_INVALID_SIZE;
/* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
val = rd32(hw, I40E_GLHMC_PEXFMAX);
pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
>> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
if (pe_filt_size + pe_cntx_size > pe_fmax)
return I40E_ERR_INVALID_SIZE;
return 0; return 0;
} }
......
...@@ -201,7 +201,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, ...@@ -201,7 +201,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
**/ **/
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info, struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf) u32 idx)
{ {
i40e_status ret_code = 0; i40e_status ret_code = 0;
struct i40e_hmc_pd_entry *pd_entry; struct i40e_hmc_pd_entry *pd_entry;
...@@ -237,10 +237,7 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, ...@@ -237,10 +237,7 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
pd_addr = (u64 *)pd_table->pd_page_addr.va; pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx; pd_addr += rel_pd_idx;
memset(pd_addr, 0, sizeof(u64)); memset(pd_addr, 0, sizeof(u64));
if (is_pf) I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
else
I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);
/* free memory here */ /* free memory here */
ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
......
...@@ -163,11 +163,6 @@ struct i40e_hmc_info { ...@@ -163,11 +163,6 @@ struct i40e_hmc_info {
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
/** /**
* I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure * @hmc_info: pointer to the HMC configuration information structure
...@@ -226,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, ...@@ -226,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
u32 pd_index); u32 pd_index);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info, struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf); u32 idx);
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx); u32 idx);
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
......
...@@ -397,7 +397,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, ...@@ -397,7 +397,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
/* remove the backing pages from pd_idx1 to i */ /* remove the backing pages from pd_idx1 to i */
while (i && (i > pd_idx1)) { while (i && (i > pd_idx1)) {
i40e_remove_pd_bp(hw, info->hmc_info, i40e_remove_pd_bp(hw, info->hmc_info,
(i - 1), true); (i - 1));
i--; i--;
} }
} }
...@@ -433,11 +433,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, ...@@ -433,11 +433,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
((j - 1) * I40E_HMC_MAX_BP_COUNT)); ((j - 1) * I40E_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
for (i = pd_idx1; i < pd_lmt1; i++) { for (i = pd_idx1; i < pd_lmt1; i++) {
i40e_remove_pd_bp( i40e_remove_pd_bp(hw, info->hmc_info, i);
hw,
info->hmc_info,
i,
true);
} }
i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
break; break;
...@@ -616,8 +612,7 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw, ...@@ -616,8 +612,7 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
pd_table = pd_table =
&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
if (pd_table->pd_entry[rel_pd_idx].valid) { if (pd_table->pd_entry[rel_pd_idx].valid) {
ret_code = i40e_remove_pd_bp(hw, info->hmc_info, ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
j, true);
if (ret_code) if (ret_code)
goto exit; goto exit;
} }
......
...@@ -39,7 +39,7 @@ static const char i40e_driver_string[] = ...@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 4 #define DRV_VERSION_MINOR 4
#define DRV_VERSION_BUILD 3 #define DRV_VERSION_BUILD 5
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN __stringify(DRV_VERSION_BUILD) DRV_KERN
...@@ -67,12 +67,10 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb); ...@@ -67,12 +67,10 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb);
*/ */
static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = { static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
...@@ -397,7 +395,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( ...@@ -397,7 +395,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
} }
rcu_read_unlock(); rcu_read_unlock();
/* following stats updated by ixgbe_watchdog_task() */ /* following stats updated by i40e_watchdog_subtask() */
stats->multicast = vsi_stats->multicast; stats->multicast = vsi_stats->multicast;
stats->tx_errors = vsi_stats->tx_errors; stats->tx_errors = vsi_stats->tx_errors;
stats->tx_dropped = vsi_stats->tx_dropped; stats->tx_dropped = vsi_stats->tx_dropped;
...@@ -657,7 +655,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf) ...@@ -657,7 +655,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
struct i40e_vsi *vsi = pf->vsi[v]; struct i40e_vsi *vsi = pf->vsi[v];
if (!vsi) if (!vsi || !vsi->tx_rings[0])
continue; continue;
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
...@@ -711,7 +709,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) ...@@ -711,7 +709,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
struct i40e_vsi *vsi = pf->vsi[v]; struct i40e_vsi *vsi = pf->vsi[v];
if (!vsi) if (!vsi || !vsi->tx_rings[0])
continue; continue;
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
...@@ -5520,6 +5518,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) ...@@ -5520,6 +5518,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
i40e_verify_eeprom(pf); i40e_verify_eeprom(pf);
} }
i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf); ret = i40e_get_capabilities(pf);
if (ret) { if (ret) {
dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
...@@ -5622,8 +5621,6 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) ...@@ -5622,8 +5621,6 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* tell the firmware that we're starting */ /* tell the firmware that we're starting */
i40e_send_version(pf); i40e_send_version(pf);
dev_info(&pf->pdev->dev, "reset complete\n");
end_core_reset: end_core_reset:
clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
} }
...@@ -6139,8 +6136,6 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) ...@@ -6139,8 +6136,6 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
vectors = 0; vectors = 0;
} }
pf->num_msix_entries = vectors;
return vectors; return vectors;
} }
...@@ -6258,7 +6253,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) ...@@ -6258,7 +6253,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
cpumask_set_cpu(v_idx, &q_vector->affinity_mask); cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
if (vsi->netdev) if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi, netif_napi_add(vsi->netdev, &q_vector->napi,
i40e_napi_poll, vsi->work_limit); i40e_napi_poll, NAPI_POLL_WEIGHT);
q_vector->rx.latency_range = I40E_LOW_LATENCY; q_vector->rx.latency_range = I40E_LOW_LATENCY;
q_vector->tx.latency_range = I40E_LOW_LATENCY; q_vector->tx.latency_range = I40E_LOW_LATENCY;
...@@ -8239,11 +8234,12 @@ static void i40e_print_features(struct i40e_pf *pf) ...@@ -8239,11 +8234,12 @@ static void i40e_print_features(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_RSS_ENABLED) if (pf->flags & I40E_FLAG_RSS_ENABLED)
buf += sprintf(buf, "RSS "); buf += sprintf(buf, "RSS ");
buf += sprintf(buf, "FDir ");
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
buf += sprintf(buf, "ATR "); buf += sprintf(buf, "FD_ATR ");
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
buf += sprintf(buf, "FD_SB ");
buf += sprintf(buf, "NTUPLE "); buf += sprintf(buf, "NTUPLE ");
}
if (pf->flags & I40E_FLAG_DCB_ENABLED) if (pf->flags & I40E_FLAG_DCB_ENABLED)
buf += sprintf(buf, "DCB "); buf += sprintf(buf, "DCB ");
if (pf->flags & I40E_FLAG_PTP) if (pf->flags & I40E_FLAG_PTP)
......
...@@ -2170,9 +2170,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) ...@@ -2170,9 +2170,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
static int i40e_xmit_descriptor_count(struct sk_buff *skb, static int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring) struct i40e_ring *tx_ring)
{ {
#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
unsigned int f; unsigned int f;
#endif
int count = 0; int count = 0;
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
...@@ -2181,12 +2179,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, ...@@ -2181,12 +2179,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
* + 1 desc for context descriptor, * + 1 desc for context descriptor,
* otherwise try next time * otherwise try next time
*/ */
#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
#else
count += skb_shinfo(skb)->nr_frags;
#endif
count += TXD_USE_COUNT(skb_headlen(skb)); count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++; tx_ring->tx_stats.tx_busy++;
......
...@@ -117,11 +117,11 @@ enum i40e_dyn_idx_t { ...@@ -117,11 +117,11 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc #define i40e_rx_desc i40e_32byte_rx_desc
#define I40E_MIN_TX_LEN 17 #define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */ #define I40E_MAX_DATA_PER_TXD 8192
/* Tx Descriptors needed, worst case */ /* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_TX_FLAGS_CSUM (u32)(1) #define I40E_TX_FLAGS_CSUM (u32)(1)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) #define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
......
...@@ -36,12 +36,10 @@ ...@@ -36,12 +36,10 @@
/* Device IDs */ /* Device IDs */
#define I40E_DEV_ID_SFP_XL710 0x1572 #define I40E_DEV_ID_SFP_XL710 0x1572
#define I40E_DEV_ID_SFP_X710 0x1573
#define I40E_DEV_ID_QEMU 0x1574 #define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_A 0x157F #define I40E_DEV_ID_KX_A 0x157F
#define I40E_DEV_ID_KX_B 0x1580 #define I40E_DEV_ID_KX_B 0x1580
#define I40E_DEV_ID_KX_C 0x1581 #define I40E_DEV_ID_KX_C 0x1581
#define I40E_DEV_ID_KX_D 0x1582
#define I40E_DEV_ID_QSFP_A 0x1583 #define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585 #define I40E_DEV_ID_QSFP_C 0x1585
...@@ -492,9 +490,6 @@ union i40e_32byte_rx_desc { ...@@ -492,9 +490,6 @@ union i40e_32byte_rx_desc {
} wb; /* writeback */ } wb; /* writeback */
}; };
#define I40E_RXD_QW1_STATUS_SHIFT 0
#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
enum i40e_rx_desc_status_bits { enum i40e_rx_desc_status_bits {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
I40E_RX_DESC_STATUS_DD_SHIFT = 0, I40E_RX_DESC_STATUS_DD_SHIFT = 0,
...@@ -511,9 +506,14 @@ enum i40e_rx_desc_status_bits { ...@@ -511,9 +506,14 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
}; };
#define I40E_RXD_QW1_STATUS_SHIFT 0
#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
<< I40E_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ #define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
......
...@@ -341,10 +341,6 @@ struct i40e_virtchnl_pf_event { ...@@ -341,10 +341,6 @@ struct i40e_virtchnl_pf_event {
int severity; int severity;
}; };
/* The following are TBD, not necessary for LAN functionality.
* I40E_VIRTCHNL_OP_FCOE
*/
/* VF reset states - these are written into the RSTAT register: /* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF * I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF * I40E_VFGEN_RSTAT on the VF
......
...@@ -43,12 +43,10 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) ...@@ -43,12 +43,10 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) { switch (hw->device_id) {
case I40E_DEV_ID_SFP_XL710: case I40E_DEV_ID_SFP_XL710:
case I40E_DEV_ID_SFP_X710:
case I40E_DEV_ID_QEMU: case I40E_DEV_ID_QEMU:
case I40E_DEV_ID_KX_A: case I40E_DEV_ID_KX_A:
case I40E_DEV_ID_KX_B: case I40E_DEV_ID_KX_B:
case I40E_DEV_ID_KX_C: case I40E_DEV_ID_KX_C:
case I40E_DEV_ID_KX_D:
case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_QSFP_C:
......
...@@ -163,11 +163,6 @@ struct i40e_hmc_info { ...@@ -163,11 +163,6 @@ struct i40e_hmc_info {
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
/** /**
* I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure * @hmc_info: pointer to the HMC configuration information structure
...@@ -226,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, ...@@ -226,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
u32 pd_index); u32 pd_index);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info, struct i40e_hmc_info *hmc_info,
u32 idx, bool is_pf); u32 idx);
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx); u32 idx);
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
......
...@@ -1511,9 +1511,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) ...@@ -1511,9 +1511,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
static int i40e_xmit_descriptor_count(struct sk_buff *skb, static int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring) struct i40e_ring *tx_ring)
{ {
#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
unsigned int f; unsigned int f;
#endif
int count = 0; int count = 0;
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
...@@ -1522,12 +1520,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, ...@@ -1522,12 +1520,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
* + 1 desc for context descriptor, * + 1 desc for context descriptor,
* otherwise try next time * otherwise try next time
*/ */
#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
#else
count += skb_shinfo(skb)->nr_frags;
#endif
count += TXD_USE_COUNT(skb_headlen(skb)); count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++; tx_ring->tx_stats.tx_busy++;
......
...@@ -117,11 +117,11 @@ enum i40e_dyn_idx_t { ...@@ -117,11 +117,11 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc #define i40e_rx_desc i40e_32byte_rx_desc
#define I40E_MIN_TX_LEN 17 #define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */ #define I40E_MAX_DATA_PER_TXD 8192
/* Tx Descriptors needed, worst case */ /* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_TX_FLAGS_CSUM (u32)(1) #define I40E_TX_FLAGS_CSUM (u32)(1)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) #define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
......
...@@ -35,13 +35,11 @@ ...@@ -35,13 +35,11 @@
#include "i40e_lan_hmc.h" #include "i40e_lan_hmc.h"
/* Device IDs */ /* Device IDs */
#define I40E_DEV_ID_SFP_XL710 0x1572 #define I40E_DEV_ID_SFP_XL710 0x1572
#define I40E_DEV_ID_SFP_X710 0x1573
#define I40E_DEV_ID_QEMU 0x1574 #define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_A 0x157F #define I40E_DEV_ID_KX_A 0x157F
#define I40E_DEV_ID_KX_B 0x1580 #define I40E_DEV_ID_KX_B 0x1580
#define I40E_DEV_ID_KX_C 0x1581 #define I40E_DEV_ID_KX_C 0x1581
#define I40E_DEV_ID_KX_D 0x1582
#define I40E_DEV_ID_QSFP_A 0x1583 #define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585 #define I40E_DEV_ID_QSFP_C 0x1585
...@@ -492,9 +490,6 @@ union i40e_32byte_rx_desc { ...@@ -492,9 +490,6 @@ union i40e_32byte_rx_desc {
} wb; /* writeback */ } wb; /* writeback */
}; };
#define I40E_RXD_QW1_STATUS_SHIFT 0
#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
enum i40e_rx_desc_status_bits { enum i40e_rx_desc_status_bits {
/* Note: These are predefined bit offsets */ /* Note: These are predefined bit offsets */
I40E_RX_DESC_STATUS_DD_SHIFT = 0, I40E_RX_DESC_STATUS_DD_SHIFT = 0,
...@@ -511,9 +506,14 @@ enum i40e_rx_desc_status_bits { ...@@ -511,9 +506,14 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18 I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
}; };
#define I40E_RXD_QW1_STATUS_SHIFT 0
#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
<< I40E_RXD_QW1_STATUS_SHIFT)
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ #define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
......
...@@ -341,10 +341,6 @@ struct i40e_virtchnl_pf_event { ...@@ -341,10 +341,6 @@ struct i40e_virtchnl_pf_event {
int severity; int severity;
}; };
/* The following are TBD, not necessary for LAN functionality.
* I40E_VIRTCHNL_OP_FCOE
*/
/* VF reset states - these are written into the RSTAT register: /* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF * I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF * I40E_VFGEN_RSTAT on the VF
......
...@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf"; ...@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] = static const char i40evf_driver_string[] =
"Intel(R) XL710 X710 Virtual Function Network Driver"; "Intel(R) XL710 X710 Virtual Function Network Driver";
#define DRV_VERSION "0.9.27" #define DRV_VERSION "0.9.29"
const char i40evf_driver_version[] = DRV_VERSION; const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] = static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation."; "Copyright (c) 2013 - 2014 Intel Corporation.";
...@@ -172,7 +172,6 @@ static void i40evf_tx_timeout(struct net_device *netdev) ...@@ -172,7 +172,6 @@ static void i40evf_tx_timeout(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40evf_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++; adapter->tx_timeout_count++;
dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
adapter->flags |= I40EVF_FLAG_RESET_NEEDED; adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task); schedule_work(&adapter->reset_task);
...@@ -662,12 +661,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) ...@@ -662,12 +661,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
f = i40evf_find_vlan(adapter, vlan); f = i40evf_find_vlan(adapter, vlan);
if (NULL == f) { if (NULL == f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC); f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (NULL == f) { if (NULL == f)
dev_info(&adapter->pdev->dev,
"%s: no memory for new VLAN filter\n",
__func__);
return NULL; return NULL;
}
f->vlan = vlan; f->vlan = vlan;
INIT_LIST_HEAD(&f->list); INIT_LIST_HEAD(&f->list);
...@@ -771,8 +767,6 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, ...@@ -771,8 +767,6 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
if (NULL == f) { if (NULL == f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC); f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (NULL == f) { if (NULL == f) {
dev_info(&adapter->pdev->dev,
"%s: no memory for new filter\n", __func__);
clear_bit(__I40EVF_IN_CRITICAL_TASK, clear_bit(__I40EVF_IN_CRITICAL_TASK,
&adapter->crit_section); &adapter->crit_section);
return NULL; return NULL;
...@@ -1034,7 +1028,7 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) ...@@ -1034,7 +1028,7 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
vector_threshold, vectors); vector_threshold, vectors);
if (err < 0) { if (err < 0) {
dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n"); dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
kfree(adapter->msix_entries); kfree(adapter->msix_entries);
adapter->msix_entries = NULL; adapter->msix_entries = NULL;
return err; return err;
...@@ -1136,9 +1130,6 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) ...@@ -1136,9 +1130,6 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors); v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter.
*/
adapter->msix_entries = kcalloc(v_budget, adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL); sizeof(struct msix_entry), GFP_KERNEL);
if (!adapter->msix_entries) { if (!adapter->msix_entries) {
...@@ -1178,7 +1169,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) ...@@ -1178,7 +1169,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
q_vector->vsi = &adapter->vsi; q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx; q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi, netif_napi_add(adapter->netdev, &q_vector->napi,
i40evf_napi_poll, 64); i40evf_napi_poll, NAPI_POLL_WEIGHT);
adapter->q_vector[q_idx] = q_vector; adapter->q_vector[q_idx] = q_vector;
} }
...@@ -1332,8 +1323,7 @@ static void i40evf_watchdog_task(struct work_struct *work) ...@@ -1332,8 +1323,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
(rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) { (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
adapter->state = __I40EVF_RESETTING; adapter->state = __I40EVF_RESETTING;
adapter->flags |= I40EVF_FLAG_RESET_PENDING; adapter->flags |= I40EVF_FLAG_RESET_PENDING;
dev_err(&adapter->pdev->dev, "Hardware reset detected.\n"); dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
schedule_work(&adapter->reset_task); schedule_work(&adapter->reset_task);
adapter->aq_pending = 0; adapter->aq_pending = 0;
adapter->aq_required = 0; adapter->aq_required = 0;
...@@ -1496,15 +1486,12 @@ static void i40evf_reset_task(struct work_struct *work) ...@@ -1496,15 +1486,12 @@ static void i40evf_reset_task(struct work_struct *work)
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK; I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (rstat_val != I40E_VFR_VFACTIVE) { if (rstat_val != I40E_VFR_VFACTIVE)
dev_info(&adapter->pdev->dev, "Reset now occurring\n");
break; break;
} else { else
msleep(I40EVF_RESET_WAIT_MS); msleep(I40EVF_RESET_WAIT_MS);
}
} }
if (i == I40EVF_RESET_WAIT_COUNT) { if (i == I40EVF_RESET_WAIT_COUNT) {
dev_err(&adapter->pdev->dev, "Reset was not detected\n");
adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
goto continue_reset; /* act like the reset happened */ goto continue_reset; /* act like the reset happened */
} }
...@@ -1513,16 +1500,14 @@ static void i40evf_reset_task(struct work_struct *work) ...@@ -1513,16 +1500,14 @@ static void i40evf_reset_task(struct work_struct *work)
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK; I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (rstat_val == I40E_VFR_VFACTIVE) { if (rstat_val == I40E_VFR_VFACTIVE)
dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
break; break;
} else { else
msleep(I40EVF_RESET_WAIT_MS); msleep(I40EVF_RESET_WAIT_MS);
}
} }
if (i == I40EVF_RESET_WAIT_COUNT) { if (i == I40EVF_RESET_WAIT_COUNT) {
/* reset never finished */ /* reset never finished */
dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n", dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
rstat_val); rstat_val);
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
...@@ -1587,7 +1572,7 @@ static void i40evf_reset_task(struct work_struct *work) ...@@ -1587,7 +1572,7 @@ static void i40evf_reset_task(struct work_struct *work)
} }
return; return;
reset_err: reset_err:
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n"); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
i40evf_close(adapter->netdev); i40evf_close(adapter->netdev);
} }
...@@ -1610,11 +1595,9 @@ static void i40evf_adminq_task(struct work_struct *work) ...@@ -1610,11 +1595,9 @@ static void i40evf_adminq_task(struct work_struct *work)
event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
if (!event.msg_buf) { if (!event.msg_buf)
dev_info(&adapter->pdev->dev, "%s: no memory for ARQ clean\n",
__func__);
return; return;
}
v_msg = (struct i40e_virtchnl_msg *)&event.desc; v_msg = (struct i40e_virtchnl_msg *)&event.desc;
do { do {
ret = i40evf_clean_arq_element(hw, &event, &pending); ret = i40evf_clean_arq_element(hw, &event, &pending);
...@@ -1868,7 +1851,7 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter) ...@@ -1868,7 +1851,7 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
return; return;
err_reinit: err_reinit:
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n"); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
i40evf_close(netdev); i40evf_close(netdev);
} }
...@@ -1989,7 +1972,7 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -1989,7 +1972,7 @@ static void i40evf_init_task(struct work_struct *work)
break; break;
case __I40EVF_INIT_VERSION_CHECK: case __I40EVF_INIT_VERSION_CHECK:
if (!i40evf_asq_done(hw)) { if (!i40evf_asq_done(hw)) {
dev_err(&pdev->dev, "Admin queue command never completed.\n"); dev_err(&pdev->dev, "Admin queue command never completed\n");
goto err; goto err;
} }
...@@ -2070,7 +2053,7 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2070,7 +2053,7 @@ static void i40evf_init_task(struct work_struct *work)
netdev->hw_features &= ~NETIF_F_RXCSUM; netdev->hw_features &= ~NETIF_F_RXCSUM;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) { if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n", dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
adapter->hw.mac.addr); adapter->hw.mac.addr);
random_ether_addr(adapter->hw.mac.addr); random_ether_addr(adapter->hw.mac.addr);
} }
...@@ -2128,7 +2111,7 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2128,7 +2111,7 @@ static void i40evf_init_task(struct work_struct *work)
netif_tx_stop_all_queues(netdev); netif_tx_stop_all_queues(netdev);
dev_info(&pdev->dev, "MAC address: %pMAC\n", adapter->hw.mac.addr); dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO) if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n"); dev_info(&pdev->dev, "GRO is enabled\n");
...@@ -2152,7 +2135,7 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2152,7 +2135,7 @@ static void i40evf_init_task(struct work_struct *work)
err: err:
/* Things went into the weeds, so try again later */ /* Things went into the weeds, so try again later */
if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n"); dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n");
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
return; /* do not reschedule */ return; /* do not reschedule */
} }
......
...@@ -219,11 +219,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) ...@@ -219,11 +219,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
vqci = kzalloc(len, GFP_ATOMIC); vqci = kzalloc(len, GFP_ATOMIC);
if (!vqci) { if (!vqci)
dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
__func__);
return; return;
}
vqci->vsi_id = adapter->vsi_res->vsi_id; vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs; vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair; vqpi = vqci->qpair;
...@@ -332,11 +330,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) ...@@ -332,11 +330,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
(adapter->num_msix_vectors * (adapter->num_msix_vectors *
sizeof(struct i40e_virtchnl_vector_map)); sizeof(struct i40e_virtchnl_vector_map));
vimi = kzalloc(len, GFP_ATOMIC); vimi = kzalloc(len, GFP_ATOMIC);
if (!vimi) { if (!vimi)
dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
__func__);
return; return;
}
vimi->num_vectors = adapter->num_msix_vectors; vimi->num_vectors = adapter->num_msix_vectors;
/* Queue vectors first */ /* Queue vectors first */
...@@ -393,7 +388,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) ...@@ -393,7 +388,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) + len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr)); (count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) { if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n", dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
__func__); __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE - count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr_list)) /
...@@ -402,11 +397,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) ...@@ -402,11 +397,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
} }
veal = kzalloc(len, GFP_ATOMIC); veal = kzalloc(len, GFP_ATOMIC);
if (!veal) { if (!veal)
dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
__func__);
return; return;
}
veal->vsi_id = adapter->vsi_res->vsi_id; veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count; veal->num_elements = count;
list_for_each_entry(f, &adapter->mac_filter_list, list) { list_for_each_entry(f, &adapter->mac_filter_list, list) {
...@@ -457,7 +450,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) ...@@ -457,7 +450,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) + len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr)); (count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) { if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n", dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
__func__); __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE - count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr_list)) /
...@@ -465,11 +458,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) ...@@ -465,11 +458,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
len = I40EVF_MAX_AQ_BUF_SIZE; len = I40EVF_MAX_AQ_BUF_SIZE;
} }
veal = kzalloc(len, GFP_ATOMIC); veal = kzalloc(len, GFP_ATOMIC);
if (!veal) { if (!veal)
dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
__func__);
return; return;
}
veal->vsi_id = adapter->vsi_res->vsi_id; veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count; veal->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
...@@ -521,7 +512,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) ...@@ -521,7 +512,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) + len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16)); (count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) { if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n", dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
__func__); __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE - count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(struct i40e_virtchnl_vlan_filter_list)) /
...@@ -529,11 +520,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) ...@@ -529,11 +520,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
len = I40EVF_MAX_AQ_BUF_SIZE; len = I40EVF_MAX_AQ_BUF_SIZE;
} }
vvfl = kzalloc(len, GFP_ATOMIC); vvfl = kzalloc(len, GFP_ATOMIC);
if (!vvfl) { if (!vvfl)
dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
__func__);
return; return;
}
vvfl->vsi_id = adapter->vsi_res->vsi_id; vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count; vvfl->num_elements = count;
list_for_each_entry(f, &adapter->vlan_filter_list, list) { list_for_each_entry(f, &adapter->vlan_filter_list, list) {
...@@ -583,7 +572,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) ...@@ -583,7 +572,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) + len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16)); (count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) { if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n", dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
__func__); __func__);
count = (I40EVF_MAX_AQ_BUF_SIZE - count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(struct i40e_virtchnl_vlan_filter_list)) /
...@@ -591,11 +580,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) ...@@ -591,11 +580,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
len = I40EVF_MAX_AQ_BUF_SIZE; len = I40EVF_MAX_AQ_BUF_SIZE;
} }
vvfl = kzalloc(len, GFP_ATOMIC); vvfl = kzalloc(len, GFP_ATOMIC);
if (!vvfl) { if (!vvfl)
dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
__func__);
return; return;
}
vvfl->vsi_id = adapter->vsi_res->vsi_id; vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count; vvfl->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
...@@ -724,7 +711,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ...@@ -724,7 +711,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
return; return;
} }
if (v_opcode != adapter->current_op) { if (v_opcode != adapter->current_op) {
dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n", dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n",
__func__, adapter->current_op, v_opcode); __func__, adapter->current_op, v_opcode);
/* We're probably completely screwed at this point, but clear /* We're probably completely screwed at this point, but clear
* the current op and try to carry on.... * the current op and try to carry on....
...@@ -733,7 +720,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ...@@ -733,7 +720,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
return; return;
} }
if (v_retval) { if (v_retval) {
dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n", dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
__func__, v_retval, v_opcode); __func__, v_retval, v_opcode);
} }
switch (v_opcode) { switch (v_opcode) {
...@@ -783,7 +770,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, ...@@ -783,7 +770,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
break; break;
default: default:
dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n", dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n",
__func__, v_opcode); __func__, v_opcode);
break; break;
} /* switch v_opcode */ } /* switch v_opcode */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment