Commit 205ed44e authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-03-27

This series contains updates to i40e and i40evf only.

Alex updates the driver code so that we can do bulk updates of the page
reference count instead of just incrementing it by one reference at a
time.  Fixed an issue where we were not resetting skb back to NULL when
we have freed it.  Cleaned up the i40e_process_skb_fields() to align with
other Intel drivers.  Removed FCoE code, since it is not supported in any
of the Fortville/Fortpark hardware, so there is not much point of carrying
the code around, especially if it is broken and untested.

Harshitha fixes a bug in the driver where the calculation of the RSS size
was not taking into account the number of traffic classes enabled.

Robert fixes a potential race condition during VF reset by eliminating
IOMMU DMAR Faults caused by VF hardware and when the OS initiates a VF
reset and before the reset is finished we modify the VF's settings.

Bimmy removes a delay that is no longer needed, since it was only needed
for preproduction hardware.

Colin King fixes null pointer dereference, where VSI was being
dereferenced before the VSI NULL check.

Jake fixes an issue with the recent addition of the "client code" to the
driver, where we attempt to use an uninitialized variable, so correctly
initialize the params variable by calling i40e_client_get_params().

v2: dropped patch 5 of the original series from Carolyn since we need
    more documentation and reason why the added delay, so Carolyn is
    taking the time to update the patch before we re-submit it for
    kernel inclusion.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 402a5bc4 7be147dc
...@@ -235,17 +235,6 @@ config I40E_DCB ...@@ -235,17 +235,6 @@ config I40E_DCB
If unsure, say N. If unsure, say N.
config I40E_FCOE
bool "Fibre Channel over Ethernet (FCoE)"
default n
depends on I40E && DCB && FCOE
---help---
Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
in the driver. This will create new netdev for exclusive FCoE
use with XL710 FCoE offloads enabled.
If unsure, say N.
config I40EVF config I40EVF
tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
depends on PCI_MSI depends on PCI_MSI
......
...@@ -45,4 +45,3 @@ i40e-objs := i40e_main.o \ ...@@ -45,4 +45,3 @@ i40e-objs := i40e_main.o \
i40e_virtchnl_pf.o i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
...@@ -56,9 +56,6 @@ ...@@ -56,9 +56,6 @@
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include "i40e_type.h" #include "i40e_type.h"
#include "i40e_prototype.h" #include "i40e_prototype.h"
#ifdef I40E_FCOE
#include "i40e_fcoe.h"
#endif
#include "i40e_client.h" #include "i40e_client.h"
#include "i40e_virtchnl.h" #include "i40e_virtchnl.h"
#include "i40e_virtchnl_pf.h" #include "i40e_virtchnl_pf.h"
...@@ -85,10 +82,6 @@ ...@@ -85,10 +82,6 @@
(((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64) (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
#define I40E_FDIR_RING 0 #define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32 #define I40E_FDIR_RING_COUNT 32
#ifdef I40E_FCOE
#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */
#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
#endif /* I40E_FCOE */
#define I40E_MAX_AQ_BUF_SIZE 4096 #define I40E_MAX_AQ_BUF_SIZE 4096
#define I40E_AQ_LEN 256 #define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
...@@ -347,10 +340,6 @@ struct i40e_pf { ...@@ -347,10 +340,6 @@ struct i40e_pf {
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
u16 num_req_vfs; /* num VFs requested for this VF */ u16 num_req_vfs; /* num VFs requested for this VF */
u16 num_vf_qps; /* num queue pairs per VF */ u16 num_vf_qps; /* num queue pairs per VF */
#ifdef I40E_FCOE
u16 num_fcoe_qps; /* num fcoe queues this PF has set up */
u16 num_fcoe_msix; /* num queue vectors per fcoe pool */
#endif /* I40E_FCOE */
u16 num_lan_qps; /* num lan queues this PF has set up */ u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */ u16 num_lan_msix; /* num queue vectors for the base PF vsi */
u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */ u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */
...@@ -411,9 +400,6 @@ struct i40e_pf { ...@@ -411,9 +400,6 @@ struct i40e_pf {
#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8) #define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
#define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9) #define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9)
#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) #define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
#ifdef I40E_FCOE
#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
#endif /* I40E_FCOE */
#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) #define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15) #define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16) #define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
...@@ -461,10 +447,6 @@ struct i40e_pf { ...@@ -461,10 +447,6 @@ struct i40e_pf {
*/ */
u64 hw_disabled_flags; u64 hw_disabled_flags;
#ifdef I40E_FCOE
struct i40e_fcoe fcoe;
#endif /* I40E_FCOE */
struct i40e_client_instance *cinst; struct i40e_client_instance *cinst;
bool stat_offsets_loaded; bool stat_offsets_loaded;
struct i40e_hw_port_stats stats; struct i40e_hw_port_stats stats;
...@@ -520,8 +502,6 @@ struct i40e_pf { ...@@ -520,8 +502,6 @@ struct i40e_pf {
*/ */
u16 dcbx_cap; u16 dcbx_cap;
u32 fcoe_hmc_filt_num;
u32 fcoe_hmc_cntx_num;
struct i40e_filter_control_settings filter_settings; struct i40e_filter_control_settings filter_settings;
struct ptp_clock *ptp_clock; struct ptp_clock *ptp_clock;
...@@ -641,11 +621,6 @@ struct i40e_vsi { ...@@ -641,11 +621,6 @@ struct i40e_vsi {
struct rtnl_link_stats64 net_stats_offsets; struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats; struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets; struct i40e_eth_stats eth_stats_offsets;
#ifdef I40E_FCOE
struct i40e_fcoe_stats fcoe_stats;
struct i40e_fcoe_stats fcoe_stats_offsets;
bool fcoe_stat_offsets_loaded;
#endif
u32 tx_restart; u32 tx_restart;
u32 tx_busy; u32 tx_busy;
u64 tx_linearize; u64 tx_linearize;
...@@ -918,11 +893,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi); ...@@ -918,11 +893,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1); u16 uplink, u32 param1);
int i40e_vsi_release(struct i40e_vsi *vsi); int i40e_vsi_release(struct i40e_vsi *vsi);
#ifdef I40E_FCOE
void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc, bool is_add);
#endif
void i40e_service_event_schedule(struct i40e_pf *pf); void i40e_service_event_schedule(struct i40e_pf *pf);
void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
u8 *msg, u16 len); u8 *msg, u16 len);
...@@ -982,20 +952,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) ...@@ -982,20 +952,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
#ifdef I40E_FCOE
void i40e_get_netdev_stats_struct(struct net_device *netdev,
struct rtnl_link_stats64 *storage);
int i40e_set_mac(struct net_device *netdev, void *p);
void i40e_set_rx_mode(struct net_device *netdev);
#endif
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
#ifdef I40E_FCOE
void i40e_tx_timeout(struct net_device *netdev);
int i40e_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
#endif
int i40e_open(struct net_device *netdev); int i40e_open(struct net_device *netdev);
int i40e_close(struct net_device *netdev); int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi); int i40e_vsi_open(struct i40e_vsi *vsi);
...@@ -1009,25 +966,6 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, ...@@ -1009,25 +966,6 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr); int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
#ifdef I40E_FCOE
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc);
void i40e_netpoll(struct net_device *netdev);
int i40e_fcoe_enable(struct net_device *netdev);
int i40e_fcoe_disable(struct net_device *netdev);
int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
void i40e_init_pf_fcoe(struct i40e_pf *pf);
int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc,
struct sk_buff *skb);
void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, u8 prog_id);
#endif /* I40E_FCOE */
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
#ifdef CONFIG_I40E_DCB #ifdef CONFIG_I40E_DCB
void i40e_dcbnl_flush_apps(struct i40e_pf *pf, void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
......
...@@ -147,6 +147,8 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) ...@@ -147,6 +147,8 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n"); dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
return; return;
} }
memset(&params, 0, sizeof(params));
i40e_client_get_params(vsi, &params);
memcpy(&cdev->lan_info.params, &params, sizeof(struct i40e_params)); memcpy(&cdev->lan_info.params, &params, sizeof(struct i40e_params));
cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client, cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client,
&params); &params);
......
...@@ -1088,33 +1088,6 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) ...@@ -1088,33 +1088,6 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
} }
#ifdef I40E_FCOE
/**
* i40e_get_san_mac_addr - get SAN MAC address
* @hw: pointer to the HW structure
* @mac_addr: pointer to SAN MAC address
*
* Reads the adapter's SAN MAC address from NVM
**/
i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
{
struct i40e_aqc_mac_address_read_data addrs;
i40e_status status;
u16 flags = 0;
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (status)
return status;
if (flags & I40E_AQC_SAN_ADDR_VALID)
ether_addr_copy(mac_addr, addrs.pf_san_mac);
else
status = I40E_ERR_INVALID_MAC_ADDR;
return status;
}
#endif
/** /**
* i40e_read_pba_string - Reads part number string from EEPROM * i40e_read_pba_string - Reads part number string from EEPROM
......
...@@ -484,25 +484,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -484,25 +484,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
vsi->bw_ets_limit_credits[i], vsi->bw_ets_limit_credits[i],
vsi->bw_ets_max_quanta[i]); vsi->bw_ets_max_quanta[i]);
} }
#ifdef I40E_FCOE
if (vsi->type == I40E_VSI_FCOE) {
dev_info(&pf->pdev->dev,
" fcoe_stats: rx_packets = %llu, rx_dwords = %llu, rx_dropped = %llu\n",
vsi->fcoe_stats.rx_fcoe_packets,
vsi->fcoe_stats.rx_fcoe_dwords,
vsi->fcoe_stats.rx_fcoe_dropped);
dev_info(&pf->pdev->dev,
" fcoe_stats: tx_packets = %llu, tx_dwords = %llu\n",
vsi->fcoe_stats.tx_fcoe_packets,
vsi->fcoe_stats.tx_fcoe_dwords);
dev_info(&pf->pdev->dev,
" fcoe_stats: bad_crc = %llu, last_error = %llu\n",
vsi->fcoe_stats.fcoe_bad_fccrc,
vsi->fcoe_stats.fcoe_last_error);
dev_info(&pf->pdev->dev, " fcoe_stats: ddp_count = %llu\n",
vsi->fcoe_stats.fcoe_ddp_count);
}
#endif
} }
/** /**
......
...@@ -162,19 +162,6 @@ static const struct i40e_stats i40e_gstrings_stats[] = { ...@@ -162,19 +162,6 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
}; };
#ifdef I40E_FCOE
static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc),
I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped),
I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets),
I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords),
I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count),
I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error),
I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets),
I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords),
};
#endif /* I40E_FCOE */
#define I40E_QUEUE_STATS_LEN(n) \ #define I40E_QUEUE_STATS_LEN(n) \
(((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \ (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
* 2 /* Tx and Rx together */ \ * 2 /* Tx and Rx together */ \
...@@ -182,17 +169,9 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = { ...@@ -182,17 +169,9 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) #define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) #define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
#ifdef I40E_FCOE
#define I40E_FCOE_STATS_LEN ARRAY_SIZE(i40e_gstrings_fcoe_stats)
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ #define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_FCOE_STATS_LEN + \
I40E_MISC_STATS_LEN + \ I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n))) I40E_QUEUE_STATS_LEN((n)))
#else
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
#endif /* I40E_FCOE */
#define I40E_PFC_STATS_LEN ( \ #define I40E_PFC_STATS_LEN ( \
(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \ (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
...@@ -1530,13 +1509,6 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, ...@@ -1530,13 +1509,6 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat == data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p; sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
} }
#ifdef I40E_FCOE
for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset;
data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
#endif
rcu_read_lock(); rcu_read_lock();
for (j = 0; j < vsi->num_queue_pairs; j++) { for (j = 0; j < vsi->num_queue_pairs; j++) {
tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
...@@ -1624,13 +1596,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, ...@@ -1624,13 +1596,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
i40e_gstrings_misc_stats[i].stat_string); i40e_gstrings_misc_stats[i].stat_string);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
#ifdef I40E_FCOE
for (i = 0; i < I40E_FCOE_STATS_LEN; i++) {
snprintf(p, ETH_GSTRING_LEN, "%s",
i40e_gstrings_fcoe_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
#endif
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i); snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
......
...@@ -299,11 +299,7 @@ void i40e_service_event_schedule(struct i40e_pf *pf) ...@@ -299,11 +299,7 @@ void i40e_service_event_schedule(struct i40e_pf *pf)
* device is munged, not just the one netdev port, so go for the full * device is munged, not just the one netdev port, so go for the full
* reset. * reset.
**/ **/
#ifdef I40E_FCOE
void i40e_tx_timeout(struct net_device *netdev)
#else
static void i40e_tx_timeout(struct net_device *netdev) static void i40e_tx_timeout(struct net_device *netdev)
#endif
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
...@@ -408,10 +404,7 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) ...@@ -408,10 +404,7 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
* Returns the address of the device statistics structure. * Returns the address of the device statistics structure.
* The statistics are actually updated from the service task. * The statistics are actually updated from the service task.
**/ **/
#ifndef I40E_FCOE static void i40e_get_netdev_stats_struct(struct net_device *netdev,
static
#endif
void i40e_get_netdev_stats_struct(struct net_device *netdev,
struct rtnl_link_stats64 *stats) struct rtnl_link_stats64 *stats)
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
...@@ -723,55 +716,6 @@ static void i40e_update_veb_stats(struct i40e_veb *veb) ...@@ -723,55 +716,6 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
veb->stat_offsets_loaded = true; veb->stat_offsets_loaded = true;
} }
#ifdef I40E_FCOE
/**
* i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
* @vsi: the VSI that is capable of doing FCoE
**/
static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_fcoe_stats *ofs;
struct i40e_fcoe_stats *fs; /* device's eth stats */
int idx;
if (vsi->type != I40E_VSI_FCOE)
return;
idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
fs = &vsi->fcoe_stats;
ofs = &vsi->fcoe_stats_offsets;
i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->fcoe_last_error, &fs->fcoe_last_error);
i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
vsi->fcoe_stat_offsets_loaded = true;
}
#endif
/** /**
* i40e_update_vsi_stats - Update the vsi statistics counters. * i40e_update_vsi_stats - Update the vsi statistics counters.
* @vsi: the VSI to be updated * @vsi: the VSI to be updated
...@@ -1129,9 +1073,6 @@ void i40e_update_stats(struct i40e_vsi *vsi) ...@@ -1129,9 +1073,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)
i40e_update_pf_stats(pf); i40e_update_pf_stats(pf);
i40e_update_vsi_stats(vsi); i40e_update_vsi_stats(vsi);
#ifdef I40E_FCOE
i40e_update_fcoe_stats(vsi);
#endif
} }
/** /**
...@@ -1562,11 +1503,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) ...@@ -1562,11 +1503,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
* *
* Returns 0 on success, negative on failure * Returns 0 on success, negative on failure
**/ **/
#ifdef I40E_FCOE
int i40e_set_mac(struct net_device *netdev, void *p)
#else
static int i40e_set_mac(struct net_device *netdev, void *p) static int i40e_set_mac(struct net_device *netdev, void *p)
#endif
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
...@@ -1626,17 +1563,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ...@@ -1626,17 +1563,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
* *
* Setup VSI queue mapping for enabled traffic classes. * Setup VSI queue mapping for enabled traffic classes.
**/ **/
#ifdef I40E_FCOE
void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc,
bool is_add)
#else
static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt, struct i40e_vsi_context *ctxt,
u8 enabled_tc, u8 enabled_tc,
bool is_add) bool is_add)
#endif
{ {
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
u16 sections = 0; u16 sections = 0;
...@@ -1686,11 +1616,6 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, ...@@ -1686,11 +1616,6 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
qcount = min_t(int, pf->alloc_rss_size, qcount = min_t(int, pf->alloc_rss_size,
num_tc_qps); num_tc_qps);
break; break;
#ifdef I40E_FCOE
case I40E_VSI_FCOE:
qcount = num_tc_qps;
break;
#endif
case I40E_VSI_FDIR: case I40E_VSI_FDIR:
case I40E_VSI_SRIOV: case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2: case I40E_VSI_VMDQ2:
...@@ -1800,11 +1725,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) ...@@ -1800,11 +1725,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
* i40e_set_rx_mode - NDO callback to set the netdev filters * i40e_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure * @netdev: network interface device structure
**/ **/
#ifdef I40E_FCOE
void i40e_set_rx_mode(struct net_device *netdev)
#else
static void i40e_set_rx_mode(struct net_device *netdev) static void i40e_set_rx_mode(struct net_device *netdev)
#endif
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
...@@ -2702,13 +2623,8 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) ...@@ -2702,13 +2623,8 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
* *
* net_device_ops implementation for adding vlan ids * net_device_ops implementation for adding vlan ids
**/ **/
#ifdef I40E_FCOE
int i40e_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
#else
static int i40e_vlan_rx_add_vid(struct net_device *netdev, static int i40e_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid) __always_unused __be16 proto, u16 vid)
#endif
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
...@@ -2739,13 +2655,8 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev, ...@@ -2739,13 +2655,8 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
* *
* net_device_ops implementation for removing vlan ids * net_device_ops implementation for removing vlan ids
**/ **/
#ifdef I40E_FCOE
int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
#else
static int i40e_vlan_rx_kill_vid(struct net_device *netdev, static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid) __always_unused __be16 proto, u16 vid)
#endif
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
...@@ -2915,9 +2826,6 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) ...@@ -2915,9 +2826,6 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs && !err; i++) for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
#ifdef I40E_FCOE
i40e_fcoe_setup_ddp_resources(vsi);
#endif
return err; return err;
} }
...@@ -2937,9 +2845,6 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) ...@@ -2937,9 +2845,6 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++) for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
i40e_free_rx_resources(vsi->rx_rings[i]); i40e_free_rx_resources(vsi->rx_rings[i]);
#ifdef I40E_FCOE
i40e_fcoe_free_ddp_resources(vsi);
#endif
} }
/** /**
...@@ -3010,9 +2915,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) ...@@ -3010,9 +2915,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.qlen = ring->count; tx_ctx.qlen = ring->count;
tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED)); I40E_FLAG_FD_ATR_ENABLED));
#ifdef I40E_FCOE
tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
#endif
tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
/* FDIR VSI tx ring can still use RS bit and writebacks */ /* FDIR VSI tx ring can still use RS bit and writebacks */
if (vsi->type != I40E_VSI_FDIR) if (vsi->type != I40E_VSI_FDIR)
...@@ -3115,9 +3017,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ...@@ -3115,9 +3017,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.l2tsel = 1; rx_ctx.l2tsel = 1;
/* this controls whether VLAN is stripped from inner headers */ /* this controls whether VLAN is stripped from inner headers */
rx_ctx.showiv = 0; rx_ctx.showiv = 0;
#ifdef I40E_FCOE
rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
#endif
/* set the prefena field to 1 because the manual says to */ /* set the prefena field to 1 because the manual says to */
rx_ctx.prefena = 1; rx_ctx.prefena = 1;
...@@ -3184,15 +3083,6 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) ...@@ -3184,15 +3083,6 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
vsi->rx_buf_len = I40E_RXBUFFER_2048; vsi->rx_buf_len = I40E_RXBUFFER_2048;
#ifdef I40E_FCOE
/* setup rx buffer for FCoE */
if ((vsi->type == I40E_VSI_FCOE) &&
(vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
vsi->rx_buf_len = I40E_RXBUFFER_3072;
vsi->max_frame = I40E_RXBUFFER_3072;
}
#endif /* I40E_FCOE */
/* round up for the chip's needs */ /* round up for the chip's needs */
vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
...@@ -3994,11 +3884,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) ...@@ -3994,11 +3884,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
* This is used by netconsole to send skbs without having to re-enable * This is used by netconsole to send skbs without having to re-enable
* interrupts. It's not called while the normal interrupt routine is executing. * interrupts. It's not called while the normal interrupt routine is executing.
**/ **/
#ifdef I40E_FCOE
void i40e_netpoll(struct net_device *netdev)
#else
static void i40e_netpoll(struct net_device *netdev) static void i40e_netpoll(struct net_device *netdev)
#endif
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
...@@ -4101,8 +3987,6 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) ...@@ -4101,8 +3987,6 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
} }
} }
if (hw->revision_id == 0)
mdelay(50);
return ret; return ret;
} }
...@@ -4485,14 +4369,6 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) ...@@ -4485,14 +4369,6 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
if (test_bit(__I40E_DOWN, &vsi->state)) if (test_bit(__I40E_DOWN, &vsi->state))
return; return;
/* No need to disable FCoE VSI when Tx suspended */
if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
vsi->type == I40E_VSI_FCOE) {
dev_dbg(&vsi->back->pdev->dev,
"VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
return;
}
set_bit(__I40E_NEEDS_RESTART, &vsi->state); set_bit(__I40E_NEEDS_RESTART, &vsi->state);
if (vsi->netdev && netif_running(vsi->netdev)) if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
...@@ -4595,8 +4471,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) ...@@ -4595,8 +4471,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
int v, ret = 0; int v, ret = 0;
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
/* No need to wait for FCoE VSI queues */ if (pf->vsi[v]) {
if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
if (ret) if (ret)
break; break;
...@@ -5220,20 +5095,12 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) ...@@ -5220,20 +5095,12 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
continue; continue;
/* - Enable all TCs for the LAN VSI /* - Enable all TCs for the LAN VSI
#ifdef I40E_FCOE
* - For FCoE VSI only enable the TC configured
* as per the APP TLV
#endif
* - For all others keep them at TC0 for now * - For all others keep them at TC0 for now
*/ */
if (v == pf->lan_vsi) if (v == pf->lan_vsi)
tc_map = i40e_pf_get_tc_map(pf); tc_map = i40e_pf_get_tc_map(pf);
else else
tc_map = I40E_DEFAULT_TRAFFIC_CLASS; tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
#ifdef I40E_FCOE
if (pf->vsi[v]->type == I40E_VSI_FCOE)
tc_map = i40e_get_fcoe_tc_map(pf);
#endif /* #ifdef I40E_FCOE */
ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
if (ret) { if (ret) {
...@@ -5597,13 +5464,8 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc) ...@@ -5597,13 +5464,8 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
return ret; return ret;
} }
#ifdef I40E_FCOE
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc)
#else
static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc) struct tc_to_netdev *tc)
#endif
{ {
if (tc->type != TC_SETUP_MQPRIO) if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL; return -EINVAL;
...@@ -6316,9 +6178,6 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) ...@@ -6316,9 +6178,6 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
switch (vsi->type) { switch (vsi->type) {
case I40E_VSI_MAIN: case I40E_VSI_MAIN:
#ifdef I40E_FCOE
case I40E_VSI_FCOE:
#endif
if (!vsi->netdev || !vsi->netdev_registered) if (!vsi->netdev || !vsi->netdev_registered)
break; break;
...@@ -7100,8 +6959,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) ...@@ -7100,8 +6959,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
goto end_core_reset; goto end_core_reset;
ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, hw->func_caps.num_rx_qp, 0, 0);
pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
if (ret) { if (ret) {
dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
goto end_core_reset; goto end_core_reset;
...@@ -7120,10 +6978,6 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) ...@@ -7120,10 +6978,6 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* Continue without DCB enabled */ /* Continue without DCB enabled */
} }
#endif /* CONFIG_I40E_DCB */ #endif /* CONFIG_I40E_DCB */
#ifdef I40E_FCOE
i40e_init_pf_fcoe(pf);
#endif
/* do basic switch setup */ /* do basic switch setup */
ret = i40e_setup_pf_switch(pf, reinit); ret = i40e_setup_pf_switch(pf, reinit);
if (ret) if (ret)
...@@ -7528,15 +7382,6 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) ...@@ -7528,15 +7382,6 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
I40E_REQ_DESCRIPTOR_MULTIPLE); I40E_REQ_DESCRIPTOR_MULTIPLE);
break; break;
#ifdef I40E_FCOE
case I40E_VSI_FCOE:
vsi->alloc_queue_pairs = pf->num_fcoe_qps;
vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
I40E_REQ_DESCRIPTOR_MULTIPLE);
vsi->num_q_vectors = pf->num_fcoe_msix;
break;
#endif /* I40E_FCOE */
default: default:
WARN_ON(1); WARN_ON(1);
return -ENODATA; return -ENODATA;
...@@ -7872,9 +7717,6 @@ static int i40e_init_msix(struct i40e_pf *pf) ...@@ -7872,9 +7717,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
* - assumes symmetric Tx/Rx pairing * - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs * - The number of VMDq pairs
* - The CPU count within the NUMA node if iWARP is enabled * - The CPU count within the NUMA node if iWARP is enabled
#ifdef I40E_FCOE
* - The number of FCOE qps.
#endif
* Once we count this up, try the request. * Once we count this up, try the request.
* *
* If we can't get what we want, we'll simplify to nearly nothing * If we can't get what we want, we'll simplify to nearly nothing
...@@ -7911,20 +7753,6 @@ static int i40e_init_msix(struct i40e_pf *pf) ...@@ -7911,20 +7753,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
} }
} }
#ifdef I40E_FCOE
/* can we reserve enough for FCoE? */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
if (!vectors_left)
pf->num_fcoe_msix = 0;
else if (vectors_left >= pf->num_fcoe_qps)
pf->num_fcoe_msix = pf->num_fcoe_qps;
else
pf->num_fcoe_msix = 1;
v_budget += pf->num_fcoe_msix;
vectors_left -= pf->num_fcoe_msix;
}
#endif
/* can we reserve enough for iWARP? */ /* can we reserve enough for iWARP? */
if (pf->flags & I40E_FLAG_IWARP_ENABLED) { if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
iwarp_requested = pf->num_iwarp_msix; iwarp_requested = pf->num_iwarp_msix;
...@@ -8018,10 +7846,6 @@ static int i40e_init_msix(struct i40e_pf *pf) ...@@ -8018,10 +7846,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
pf->num_vmdq_vsis = 1; pf->num_vmdq_vsis = 1;
pf->num_vmdq_qps = 1; pf->num_vmdq_qps = 1;
#ifdef I40E_FCOE
pf->num_fcoe_qps = 0;
pf->num_fcoe_msix = 0;
#endif
/* partition out the remaining vectors */ /* partition out the remaining vectors */
switch (vec) { switch (vec) {
...@@ -8035,13 +7859,6 @@ static int i40e_init_msix(struct i40e_pf *pf) ...@@ -8035,13 +7859,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
} else { } else {
pf->num_lan_msix = 2; pf->num_lan_msix = 2;
} }
#ifdef I40E_FCOE
/* give one vector to FCoE */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
pf->num_lan_msix = 1;
pf->num_fcoe_msix = 1;
}
#endif
break; break;
default: default:
if (pf->flags & I40E_FLAG_IWARP_ENABLED) { if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
...@@ -8061,13 +7878,6 @@ static int i40e_init_msix(struct i40e_pf *pf) ...@@ -8061,13 +7878,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
(vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
pf->num_lan_msix); pf->num_lan_msix);
pf->num_lan_qps = pf->num_lan_msix; pf->num_lan_qps = pf->num_lan_msix;
#ifdef I40E_FCOE
/* give one vector to FCoE */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
pf->num_fcoe_msix = 1;
vec--;
}
#endif
break; break;
} }
} }
...@@ -8088,13 +7898,6 @@ static int i40e_init_msix(struct i40e_pf *pf) ...@@ -8088,13 +7898,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_IWARP_ENABLED; pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
} }
#ifdef I40E_FCOE
if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
}
#endif
i40e_debug(&pf->hw, I40E_DEBUG_INIT, i40e_debug(&pf->hw, I40E_DEBUG_INIT,
"MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
pf->num_lan_msix, pf->num_lan_msix,
...@@ -8193,9 +7996,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) ...@@ -8193,9 +7996,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (vectors < 0) { if (vectors < 0) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_IWARP_ENABLED | I40E_FLAG_IWARP_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
I40E_FLAG_RSS_ENABLED | I40E_FLAG_RSS_ENABLED |
I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_DCB_ENABLED | I40E_FLAG_DCB_ENABLED |
...@@ -8577,9 +8377,12 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) ...@@ -8577,9 +8377,12 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val); i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
/* Determine the RSS size of the VSI */ /* Determine the RSS size of the VSI */
if (!vsi->rss_size) if (!vsi->rss_size) {
vsi->rss_size = min_t(int, pf->alloc_rss_size, u16 qcount;
vsi->num_queue_pairs);
qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
}
if (!vsi->rss_size) if (!vsi->rss_size)
return -EINVAL; return -EINVAL;
...@@ -8625,6 +8428,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) ...@@ -8625,6 +8428,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
new_rss_size = min_t(int, queue_count, pf->rss_size_max); new_rss_size = min_t(int, queue_count, pf->rss_size_max);
if (queue_count != vsi->num_queue_pairs) { if (queue_count != vsi->num_queue_pairs) {
u16 qcount;
vsi->req_queue_pairs = queue_count; vsi->req_queue_pairs = queue_count;
i40e_prep_for_reset(pf); i40e_prep_for_reset(pf);
...@@ -8642,8 +8447,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) ...@@ -8642,8 +8447,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
} }
/* Reset vsi->rss_size, as number of enabled queues changed */ /* Reset vsi->rss_size, as number of enabled queues changed */
vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
vsi->num_queue_pairs); vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
i40e_pf_config_rss(pf); i40e_pf_config_rss(pf);
} }
...@@ -8876,10 +8681,6 @@ static int i40e_sw_init(struct i40e_pf *pf) ...@@ -8876,10 +8681,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_iwarp_msix = (int)num_online_cpus() + 1; pf->num_iwarp_msix = (int)num_online_cpus() + 1;
} }
#ifdef I40E_FCOE
i40e_init_pf_fcoe(pf);
#endif /* I40E_FCOE */
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
...@@ -9406,10 +9207,6 @@ static const struct net_device_ops i40e_netdev_ops = { ...@@ -9406,10 +9207,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_poll_controller = i40e_netpoll, .ndo_poll_controller = i40e_netpoll,
#endif #endif
.ndo_setup_tc = __i40e_setup_tc, .ndo_setup_tc = __i40e_setup_tc,
#ifdef I40E_FCOE
.ndo_fcoe_enable = i40e_fcoe_enable,
.ndo_fcoe_disable = i40e_fcoe_disable,
#endif
.ndo_set_features = i40e_set_features, .ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac, .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
...@@ -9543,9 +9340,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) ...@@ -9543,9 +9340,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->netdev_ops = &i40e_netdev_ops; netdev->netdev_ops = &i40e_netdev_ops;
netdev->watchdog_timeo = 5 * HZ; netdev->watchdog_timeo = 5 * HZ;
i40e_set_ethtool_ops(netdev); i40e_set_ethtool_ops(netdev);
#ifdef I40E_FCOE
i40e_fcoe_config_netdev(netdev, vsi);
#endif
/* MTU range: 68 - 9706 */ /* MTU range: 68 - 9706 */
netdev->min_mtu = ETH_MIN_MTU; netdev->min_mtu = ETH_MIN_MTU;
...@@ -9769,16 +9563,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ...@@ -9769,16 +9563,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break; break;
#ifdef I40E_FCOE
case I40E_VSI_FCOE:
ret = i40e_fcoe_vsi_init(vsi, &ctxt);
if (ret) {
dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
return ret;
}
break;
#endif /* I40E_FCOE */
case I40E_VSI_IWARP: case I40E_VSI_IWARP:
/* send down message to iWARP */ /* send down message to iWARP */
break; break;
...@@ -10195,7 +9979,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, ...@@ -10195,7 +9979,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
} }
} }
case I40E_VSI_VMDQ2: case I40E_VSI_VMDQ2:
case I40E_VSI_FCOE:
ret = i40e_config_netdev(vsi); ret = i40e_config_netdev(vsi);
if (ret) if (ret)
goto err_netdev; goto err_netdev;
...@@ -10855,9 +10638,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) ...@@ -10855,9 +10638,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
int queues_left; int queues_left;
pf->num_lan_qps = 0; pf->num_lan_qps = 0;
#ifdef I40E_FCOE
pf->num_fcoe_qps = 0;
#endif
/* Find the max queues to be put into basic use. We'll always be /* Find the max queues to be put into basic use. We'll always be
* using TC0, whether or not DCB is running, and TC0 will get the * using TC0, whether or not DCB is running, and TC0 will get the
...@@ -10874,9 +10654,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) ...@@ -10874,9 +10654,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* make sure all the fancies are disabled */ /* make sure all the fancies are disabled */
pf->flags &= ~(I40E_FLAG_RSS_ENABLED | pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_IWARP_ENABLED | I40E_FLAG_IWARP_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_CAPABLE |
...@@ -10893,9 +10670,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) ...@@ -10893,9 +10670,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->flags &= ~(I40E_FLAG_RSS_ENABLED | pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_IWARP_ENABLED | I40E_FLAG_IWARP_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_ENABLED | I40E_FLAG_DCB_ENABLED |
...@@ -10916,22 +10690,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) ...@@ -10916,22 +10690,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= pf->num_lan_qps; queues_left -= pf->num_lan_qps;
} }
#ifdef I40E_FCOE
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
if (I40E_DEFAULT_FCOE <= queues_left) {
pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
} else if (I40E_MINIMUM_FCOE <= queues_left) {
pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
} else {
pf->num_fcoe_qps = 0;
pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
}
queues_left -= pf->num_fcoe_qps;
}
#endif
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
if (queues_left > 1) { if (queues_left > 1) {
queues_left -= 1; /* save 1 queue for FD */ queues_left -= 1; /* save 1 queue for FD */
...@@ -10963,9 +10721,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) ...@@ -10963,9 +10721,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
queues_left); queues_left);
#ifdef I40E_FCOE
dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
#endif
} }
/** /**
...@@ -11032,10 +10787,6 @@ static void i40e_print_features(struct i40e_pf *pf) ...@@ -11032,10 +10787,6 @@ static void i40e_print_features(struct i40e_pf *pf)
i += snprintf(&buf[i], REMAIN(i), " Geneve"); i += snprintf(&buf[i], REMAIN(i), " Geneve");
if (pf->flags & I40E_FLAG_PTP) if (pf->flags & I40E_FLAG_PTP)
i += snprintf(&buf[i], REMAIN(i), " PTP"); i += snprintf(&buf[i], REMAIN(i), " PTP");
#ifdef I40E_FCOE
if (pf->flags & I40E_FLAG_FCOE_ENABLED)
i += snprintf(&buf[i], REMAIN(i), " FCOE");
#endif
if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
i += snprintf(&buf[i], REMAIN(i), " VEB"); i += snprintf(&buf[i], REMAIN(i), " VEB");
else else
...@@ -11253,8 +11004,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -11253,8 +11004,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, hw->func_caps.num_rx_qp, 0, 0);
pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
if (err) { if (err) {
dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
goto err_init_lan_hmc; goto err_init_lan_hmc;
...@@ -11289,18 +11039,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -11289,18 +11039,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_get_port_mac_addr(hw, hw->mac.port_addr); i40e_get_port_mac_addr(hw, hw->mac.port_addr);
if (is_valid_ether_addr(hw->mac.port_addr)) if (is_valid_ether_addr(hw->mac.port_addr))
pf->flags |= I40E_FLAG_PORT_ID_VALID; pf->flags |= I40E_FLAG_PORT_ID_VALID;
#ifdef I40E_FCOE
err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
if (err)
dev_info(&pdev->dev,
"(non-fatal) SAN MAC retrieval failed: %d\n", err);
if (!is_valid_ether_addr(hw->mac.san_addr)) {
dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
hw->mac.san_addr);
ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
}
dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
#endif /* I40E_FCOE */
pci_set_drvdata(pdev, pf); pci_set_drvdata(pdev, pf);
pci_save_state(pdev); pci_save_state(pdev);
...@@ -11496,11 +11234,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -11496,11 +11234,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
err); err);
#ifdef I40E_FCOE
/* create FCoE interface */
i40e_fcoe_vsi_setup(pf);
#endif
#define PCI_SPEED_SIZE 8 #define PCI_SPEED_SIZE 8
#define PCI_WIDTH_SIZE 8 #define PCI_WIDTH_SIZE 8
/* Devices on the IOSF bus do not have this information /* Devices on the IOSF bus do not have this information
......
...@@ -78,7 +78,4 @@ do { \ ...@@ -78,7 +78,4 @@ do { \
} while (0) } while (0)
typedef enum i40e_status_code i40e_status; typedef enum i40e_status_code i40e_status;
#ifdef CONFIG_I40E_FCOE
#define I40E_FCOE
#endif
#endif /* _I40E_OSDEP_H_ */ #endif /* _I40E_OSDEP_H_ */
...@@ -304,9 +304,6 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, ...@@ -304,9 +304,6 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
u32 pba_num_size); u32 pba_num_size);
i40e_status i40e_validate_mac_addr(u8 *mac_addr); i40e_status i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
#ifdef I40E_FCOE
i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
#endif
/* prototype for functions used for NVM access */ /* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw); i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw, i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
......
...@@ -1062,11 +1062,6 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring, ...@@ -1062,11 +1062,6 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
i40e_fd_handle_status(rx_ring, rx_desc, id); i40e_fd_handle_status(rx_ring, rx_desc, id);
#ifdef I40E_FCOE
else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
(id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
i40e_fcoe_handle_status(rx_ring, rx_desc, id);
#endif
} }
/** /**
...@@ -1154,7 +1149,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) ...@@ -1154,7 +1149,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
PAGE_SIZE, PAGE_SIZE,
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
I40E_RX_DMA_ATTR); I40E_RX_DMA_ATTR);
__free_pages(rx_bi->page, 0); __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
rx_bi->page = NULL; rx_bi->page = NULL;
rx_bi->page_offset = 0; rx_bi->page_offset = 0;
...@@ -1299,6 +1294,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, ...@@ -1299,6 +1294,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
bi->dma = dma; bi->dma = dma;
bi->page = page; bi->page = page;
bi->page_offset = 0; bi->page_offset = 0;
bi->pagecnt_bias = 1;
return true; return true;
} }
...@@ -1391,8 +1387,6 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -1391,8 +1387,6 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
* @vsi: the VSI we care about * @vsi: the VSI we care about
* @skb: skb currently being received and modified * @skb: skb currently being received and modified
* @rx_desc: the receive descriptor * @rx_desc: the receive descriptor
*
* skb->protocol must be set before this function is called
**/ **/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi, static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb, struct sk_buff *skb,
...@@ -1554,12 +1548,12 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring, ...@@ -1554,12 +1548,12 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb_record_rx_queue(skb, rx_ring->queue_index); skb_record_rx_queue(skb, rx_ring->queue_index);
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
} }
/** /**
...@@ -1604,7 +1598,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, ...@@ -1604,7 +1598,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */ /* transfer page from old buffer to new buffer */
*new_buff = *old_buff; new_buff->dma = old_buff->dma;
new_buff->page = old_buff->page;
new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
} }
/** /**
...@@ -1656,6 +1653,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1656,6 +1653,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE >= 8192) #if (PAGE_SIZE >= 8192)
unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif #endif
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
/* Is any reuse possible? */ /* Is any reuse possible? */
if (unlikely(!i40e_page_is_reusable(page))) if (unlikely(!i40e_page_is_reusable(page)))
...@@ -1663,7 +1661,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1663,7 +1661,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != 1)) if (unlikely(page_count(page) != pagecnt_bias))
return false; return false;
/* flip page offset to other buffer */ /* flip page offset to other buffer */
...@@ -1676,9 +1674,14 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1676,9 +1674,14 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
return false; return false;
#endif #endif
/* Inc ref count on page before passing it up to the stack */ /* If we have drained the page fragment pool we need to update
get_page(page); * the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
if (unlikely(pagecnt_bias == 1)) {
page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
return true; return true;
} }
...@@ -1725,7 +1728,6 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, ...@@ -1725,7 +1728,6 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
return true; return true;
/* this page cannot be reused so discard it */ /* this page cannot be reused so discard it */
__free_pages(page, 0);
return false; return false;
} }
...@@ -1819,6 +1821,8 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, ...@@ -1819,6 +1821,8 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
/* we are not reusing the buffer so unmap it */ /* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
} }
/* clear contents of buffer_info */ /* clear contents of buffer_info */
...@@ -1930,6 +1934,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1930,6 +1934,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/ */
if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
skb = NULL;
continue; continue;
} }
...@@ -1948,15 +1953,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1948,15 +1953,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* populate checksum, VLAN, and protocol */ /* populate checksum, VLAN, and protocol */
i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
#ifdef I40E_FCOE
if (unlikely(
i40e_rx_is_fcoe(rx_ptype) &&
!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
dev_kfree_skb_any(skb);
continue;
}
#endif
vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
...@@ -2332,15 +2328,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2332,15 +2328,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
* Returns error code indicate the frame should be dropped upon error and the * Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly. * otherwise returns 0 to indicate the flags has been set properly.
**/ **/
#ifdef I40E_FCOE
inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
#else
static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, struct i40e_ring *tx_ring,
u32 *flags) u32 *flags)
#endif
{ {
__be16 protocol = skb->protocol; __be16 protocol = skb->protocol;
u32 tx_flags = 0; u32 tx_flags = 0;
...@@ -2848,15 +2838,9 @@ bool __i40e_chk_linearize(struct sk_buff *skb) ...@@ -2848,15 +2838,9 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
* @td_cmd: the command field in the descriptor * @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc * @td_offset: offset for checksum or crc
**/ **/
#ifdef I40E_FCOE
inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
#else
static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags, struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset) const u8 hdr_len, u32 td_cmd, u32 td_offset)
#endif
{ {
unsigned int data_len = skb->data_len; unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb); unsigned int size = skb_headlen(skb);
......
...@@ -258,7 +258,12 @@ struct i40e_tx_buffer { ...@@ -258,7 +258,12 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer { struct i40e_rx_buffer {
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
unsigned int page_offset; #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
#else
__u16 page_offset;
#endif
__u16 pagecnt_bias;
}; };
struct i40e_queue_stats { struct i40e_queue_stats {
...@@ -396,13 +401,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring); ...@@ -396,13 +401,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40e_free_tx_resources(struct i40e_ring *tx_ring); void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring); void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget); int i40e_napi_poll(struct napi_struct *napi, int budget);
#ifdef I40E_FCOE
void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset);
int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, u32 *flags);
#endif
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
...@@ -485,16 +483,6 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) ...@@ -485,16 +483,6 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
return count != I40E_MAX_BUFFER_TXD; return count != I40E_MAX_BUFFER_TXD;
} }
/**
* i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
* @ptype: the packet type field from Rx descriptor write-back
**/
static inline bool i40e_rx_is_fcoe(u16 ptype)
{
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
/** /**
* txring_txq - Find the netdev Tx ring based on the i40e Tx ring * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ring: Tx ring to find the netdev equivalent of * @ring: Tx ring to find the netdev equivalent of
......
...@@ -1213,25 +1213,6 @@ struct i40e_veb_tc_stats { ...@@ -1213,25 +1213,6 @@ struct i40e_veb_tc_stats {
u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
}; };
#ifdef I40E_FCOE
/* Statistics collected per function for FCoE */
struct i40e_fcoe_stats {
u64 rx_fcoe_packets; /* fcoeprc */
u64 rx_fcoe_dwords; /* focedwrc */
u64 rx_fcoe_dropped; /* fcoerpdc */
u64 tx_fcoe_packets; /* fcoeptc */
u64 tx_fcoe_dwords; /* focedwtc */
u64 fcoe_bad_fccrc; /* fcoecrc */
u64 fcoe_last_error; /* fcoelast */
u64 fcoe_ddp_count; /* fcoeddpc */
};
/* offset to per function FCoE statistics block */
#define I40E_FCOE_VF_STAT_OFFSET 0
#define I40E_FCOE_PF_STAT_OFFSET 128
#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF)
#endif
/* Statistics collected by the MAC */ /* Statistics collected by the MAC */
struct i40e_hw_port_stats { struct i40e_hw_port_stats {
/* eth stats collected by the port */ /* eth stats collected by the port */
...@@ -1319,125 +1300,6 @@ struct i40e_hw_port_stats { ...@@ -1319,125 +1300,6 @@ struct i40e_hw_port_stats {
#define I40E_SRRD_SRCTL_ATTEMPTS 100000 #define I40E_SRRD_SRCTL_ATTEMPTS 100000
#ifdef I40E_FCOE
/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */
enum i40E_fcoe_tx_ctx_desc_cmd_bits {
I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10,
I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20,
I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40,
I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80
};
/* FCoE DDP Context descriptor */
struct i40e_fcoe_ddp_context_desc {
__le64 rsvd;
__le64 type_cmd_foff_lsize;
};
#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0
#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \
I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4
#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)
enum i40e_fcoe_ddp_ctx_desc_cmd_bits {
I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */
I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */
I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */
I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */
I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */
I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */
};
#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16
#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \
I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32
#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \
I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
/* FCoE DDP/DWO Queue Context descriptor */
struct i40e_fcoe_queue_context_desc {
__le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */
__le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */
};
#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0
#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \
I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12
#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \
I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0
#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \
I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13
#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \
I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
enum i40e_fcoe_queue_ctx_desc_tph_bits {
I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1,
I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2
};
#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30
#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \
I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
/* FCoE DDP/DWO Filter Context descriptor */
struct i40e_fcoe_filter_context_desc {
__le32 param;
__le16 seqn;
/* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
__le16 rsvd_dmaindx;
/* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
__le64 flags_rsvd_lanq;
};
#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \
I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
enum i40e_fcoe_filter_ctx_desc_flags_bits {
I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00,
I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01,
I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00,
I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02,
I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00,
I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04
};
#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0
#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \
I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \
I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53
#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \
I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
#endif /* I40E_FCOE */
enum i40e_switch_element_types { enum i40e_switch_element_types {
I40E_SWITCH_ELEMENT_TYPE_MAC = 1, I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
I40E_SWITCH_ELEMENT_TYPE_PF = 2, I40E_SWITCH_ELEMENT_TYPE_PF = 2,
......
...@@ -809,6 +809,11 @@ static void i40e_free_vf_res(struct i40e_vf *vf) ...@@ -809,6 +809,11 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
u32 reg_idx, reg; u32 reg_idx, reg;
int i, msix_vf; int i, msix_vf;
/* Start by disabling VF's configuration API to prevent the OS from
* accessing the VF's VSI after it's freed / invalidated.
*/
clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
/* free vsi & disconnect it from the parent uplink */ /* free vsi & disconnect it from the parent uplink */
if (vf->lan_vsi_idx) { if (vf->lan_vsi_idx) {
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
...@@ -848,7 +853,6 @@ static void i40e_free_vf_res(struct i40e_vf *vf) ...@@ -848,7 +853,6 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
/* reset some of the state variables keeping track of the resources */ /* reset some of the state variables keeping track of the resources */
vf->num_queue_pairs = 0; vf->num_queue_pairs = 0;
vf->vf_states = 0; vf->vf_states = 0;
clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
} }
/** /**
...@@ -939,6 +943,14 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) ...@@ -939,6 +943,14 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
/* warn the VF */ /* warn the VF */
clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled
* in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
* It's normally disabled in i40e_free_vf_res(), but it's safer
* to do it earlier to give some time to finish to any VF config
* functions that may still be running at this point.
*/
clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
/* In the case of a VFLR, the HW has already reset the VF and we /* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register. * just need to clean up, so don't hit the VFRTRIG register.
*/ */
...@@ -982,11 +994,6 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) ...@@ -982,11 +994,6 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (!rsd) if (!rsd)
dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
vf->vf_id); vf->vf_id);
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
/* clear the reset bit in the VPGEN_VFRTRIG reg */
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
/* On initial reset, we won't have any queues */ /* On initial reset, we won't have any queues */
if (vf->lan_vsi_idx == 0) if (vf->lan_vsi_idx == 0)
...@@ -994,8 +1001,24 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) ...@@ -994,8 +1001,24 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
complete_reset: complete_reset:
/* reallocate VF resources to reset the VSI state */ /* free VF resources to begin resetting the VSI state */
i40e_free_vf_res(vf); i40e_free_vf_res(vf);
/* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
* By doing this we allow HW to access VF memory at any point. If we
* did it any sooner, HW could access memory while it was being freed
* in i40e_free_vf_res(), causing an IOMMU fault.
*
* On the other hand, this needs to be done ASAP, because the VF driver
* is waiting for this to happen and may report a timeout. It's
* harmless, but it gets logged into Guest OS kernel log, so best avoid
* it.
*/
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
/* reallocate VF resources to finish resetting the VSI state */
if (!i40e_alloc_vf_res(vf)) { if (!i40e_alloc_vf_res(vf)) {
int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
i40e_enable_vf_mappings(vf); i40e_enable_vf_mappings(vf);
...@@ -1006,7 +1029,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) ...@@ -1006,7 +1029,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
i40e_notify_client_of_vf_reset(pf, abs_vf_id); i40e_notify_client_of_vf_reset(pf, abs_vf_id);
vf->num_vlan = 0; vf->num_vlan = 0;
} }
/* tell the VF the reset is done */
/* Tell the VF driver the reset is done. This needs to be done only
* after VF has been fully initialized, because the VF driver may
* request resources immediately after setting this flag.
*/
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
i40e_flush(hw); i40e_flush(hw);
......
...@@ -87,7 +87,6 @@ struct i40e_vf { ...@@ -87,7 +87,6 @@ struct i40e_vf {
u16 stag; u16 stag;
struct i40e_virtchnl_ether_addr default_lan_addr; struct i40e_virtchnl_ether_addr default_lan_addr;
struct i40e_virtchnl_ether_addr default_fcoe_addr;
u16 port_vlan_id; u16 port_vlan_id;
bool pf_set_mac; /* The VMM admin set the VF MAC address */ bool pf_set_mac; /* The VMM admin set the VF MAC address */
bool trusted; bool trusted;
......
...@@ -526,7 +526,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) ...@@ -526,7 +526,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
PAGE_SIZE, PAGE_SIZE,
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
I40E_RX_DMA_ATTR); I40E_RX_DMA_ATTR);
__free_pages(rx_bi->page, 0); __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
rx_bi->page = NULL; rx_bi->page = NULL;
rx_bi->page_offset = 0; rx_bi->page_offset = 0;
...@@ -671,6 +671,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, ...@@ -671,6 +671,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
bi->dma = dma; bi->dma = dma;
bi->page = page; bi->page = page;
bi->page_offset = 0; bi->page_offset = 0;
bi->pagecnt_bias = 1;
return true; return true;
} }
...@@ -763,8 +764,6 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -763,8 +764,6 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
* @vsi: the VSI we care about * @vsi: the VSI we care about
* @skb: skb currently being received and modified * @skb: skb currently being received and modified
* @rx_desc: the receive descriptor * @rx_desc: the receive descriptor
*
* skb->protocol must be set before this function is called
**/ **/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi, static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb, struct sk_buff *skb,
...@@ -916,12 +915,12 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring, ...@@ -916,12 +915,12 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
{ {
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb_record_rx_queue(skb, rx_ring->queue_index); skb_record_rx_queue(skb, rx_ring->queue_index);
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
} }
/** /**
...@@ -966,7 +965,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, ...@@ -966,7 +965,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */ /* transfer page from old buffer to new buffer */
*new_buff = *old_buff; new_buff->dma = old_buff->dma;
new_buff->page = old_buff->page;
new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
} }
/** /**
...@@ -1018,6 +1020,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1018,6 +1020,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE >= 8192) #if (PAGE_SIZE >= 8192)
unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif #endif
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
/* Is any reuse possible? */ /* Is any reuse possible? */
if (unlikely(!i40e_page_is_reusable(page))) if (unlikely(!i40e_page_is_reusable(page)))
...@@ -1025,7 +1028,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1025,7 +1028,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != 1)) if (unlikely(page_count(page) != pagecnt_bias))
return false; return false;
/* flip page offset to other buffer */ /* flip page offset to other buffer */
...@@ -1038,8 +1041,14 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1038,8 +1041,14 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
return false; return false;
#endif #endif
/* Inc ref count on page before passing it up to the stack */ /* If we have drained the page fragment pool we need to update
get_page(page); * the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
if (unlikely(pagecnt_bias == 1)) {
page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
return true; return true;
} }
...@@ -1087,7 +1096,6 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, ...@@ -1087,7 +1096,6 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
return true; return true;
/* this page cannot be reused so discard it */ /* this page cannot be reused so discard it */
__free_pages(page, 0);
return false; return false;
} }
...@@ -1181,6 +1189,8 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, ...@@ -1181,6 +1189,8 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
/* we are not reusing the buffer so unmap it */ /* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
} }
/* clear contents of buffer_info */ /* clear contents of buffer_info */
...@@ -1287,6 +1297,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1287,6 +1297,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/ */
if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
skb = NULL;
continue; continue;
} }
......
...@@ -244,7 +244,12 @@ struct i40e_tx_buffer { ...@@ -244,7 +244,12 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer { struct i40e_rx_buffer {
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
unsigned int page_offset; #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
#else
__u16 page_offset;
#endif
__u16 pagecnt_bias;
}; };
struct i40e_queue_stats { struct i40e_queue_stats {
...@@ -463,19 +468,7 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) ...@@ -463,19 +468,7 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
/* we can support up to 8 data buffers for a single send */ /* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD; return count != I40E_MAX_BUFFER_TXD;
} }
/**
* i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
* @ptype: the packet type field from Rx descriptor write-back
**/
static inline bool i40e_rx_is_fcoe(u16 ptype)
{
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
/** /**
* txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ring: Tx ring to find the netdev equivalent of * @ring: Tx ring to find the netdev equivalent of
**/ **/
static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
......
...@@ -34,12 +34,12 @@ static struct i40e_ops i40evf_lan_ops = { ...@@ -34,12 +34,12 @@ static struct i40e_ops i40evf_lan_ops = {
**/ **/
void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len) void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
{ {
struct i40evf_adapter *adapter = vsi->back; struct i40e_client_instance *cinst;
struct i40e_client_instance *cinst = adapter->cinst;
if (!vsi) if (!vsi)
return; return;
cinst = vsi->back->cinst;
if (!cinst || !cinst->client || !cinst->client->ops || if (!cinst || !cinst->client || !cinst->client->ops ||
!cinst->client->ops->virtchnl_receive) { !cinst->client->ops->virtchnl_receive) {
dev_dbg(&vsi->back->pdev->dev, dev_dbg(&vsi->back->pdev->dev,
...@@ -58,12 +58,13 @@ void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len) ...@@ -58,12 +58,13 @@ void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
**/ **/
void i40evf_notify_client_l2_params(struct i40e_vsi *vsi) void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
{ {
struct i40evf_adapter *adapter = vsi->back; struct i40e_client_instance *cinst;
struct i40e_client_instance *cinst = adapter->cinst;
struct i40e_params params; struct i40e_params params;
if (!vsi) if (!vsi)
return; return;
cinst = vsi->back->cinst;
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
params.mtu = vsi->netdev->mtu; params.mtu = vsi->netdev->mtu;
params.link_up = vsi->back->link_up; params.link_up = vsi->back->link_up;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment