Commit 205ed44e authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-03-27

This series contains updates to i40e and i40evf only.

Alex updates the driver code so that we can do bulk updates of the page
reference count instead of just incrementing it by one reference at a
time.  Fixed an issue where we were not resetting skb back to NULL when
we have freed it.  Cleaned up the i40e_process_skb_fields() to align with
other Intel drivers.  Removed FCoE code, since it is not supported in any
of the Fortville/Fortpark hardware, so there is not much point of carrying
the code around, especially if it is broken and untested.

Harshitha fixes a bug in the driver where the calculation of the RSS size
was not taking into account the number of traffic classes enabled.

Robert fixes a potential race condition during VF reset by eliminating
IOMMU DMAR Faults caused by VF hardware and when the OS initiates a VF
reset and before the reset is finished we modify the VF's settings.

Bimmy removes a delay that is no longer needed, since it was only needed
for preproduction hardware.

Colin King fixes null pointer dereference, where VSI was being
dereferenced before the VSI NULL check.

Jake fixes an issue with the recent addition of the "client code" to the
driver, where we attempt to use an uninitialized variable, so correctly
initialize the params variable by calling i40e_client_get_params().

v2: dropped patch 5 of the original series from Carolyn since we need
    more documentation and reason why the added delay, so Carolyn is
    taking the time to update the patch before we re-submit it for
    kernel inclusion.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 402a5bc4 7be147dc
...@@ -235,17 +235,6 @@ config I40E_DCB ...@@ -235,17 +235,6 @@ config I40E_DCB
If unsure, say N. If unsure, say N.
config I40E_FCOE
bool "Fibre Channel over Ethernet (FCoE)"
default n
depends on I40E && DCB && FCOE
---help---
Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
in the driver. This will create new netdev for exclusive FCoE
use with XL710 FCoE offloads enabled.
If unsure, say N.
config I40EVF config I40EVF
tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
depends on PCI_MSI depends on PCI_MSI
......
...@@ -45,4 +45,3 @@ i40e-objs := i40e_main.o \ ...@@ -45,4 +45,3 @@ i40e-objs := i40e_main.o \
i40e_virtchnl_pf.o i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
...@@ -56,9 +56,6 @@ ...@@ -56,9 +56,6 @@
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include "i40e_type.h" #include "i40e_type.h"
#include "i40e_prototype.h" #include "i40e_prototype.h"
#ifdef I40E_FCOE
#include "i40e_fcoe.h"
#endif
#include "i40e_client.h" #include "i40e_client.h"
#include "i40e_virtchnl.h" #include "i40e_virtchnl.h"
#include "i40e_virtchnl_pf.h" #include "i40e_virtchnl_pf.h"
...@@ -85,10 +82,6 @@ ...@@ -85,10 +82,6 @@
(((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64) (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
#define I40E_FDIR_RING 0 #define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32 #define I40E_FDIR_RING_COUNT 32
#ifdef I40E_FCOE
#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */
#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
#endif /* I40E_FCOE */
#define I40E_MAX_AQ_BUF_SIZE 4096 #define I40E_MAX_AQ_BUF_SIZE 4096
#define I40E_AQ_LEN 256 #define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
...@@ -347,10 +340,6 @@ struct i40e_pf { ...@@ -347,10 +340,6 @@ struct i40e_pf {
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
u16 num_req_vfs; /* num VFs requested for this VF */ u16 num_req_vfs; /* num VFs requested for this VF */
u16 num_vf_qps; /* num queue pairs per VF */ u16 num_vf_qps; /* num queue pairs per VF */
#ifdef I40E_FCOE
u16 num_fcoe_qps; /* num fcoe queues this PF has set up */
u16 num_fcoe_msix; /* num queue vectors per fcoe pool */
#endif /* I40E_FCOE */
u16 num_lan_qps; /* num lan queues this PF has set up */ u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */ u16 num_lan_msix; /* num queue vectors for the base PF vsi */
u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */ u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */
...@@ -411,9 +400,6 @@ struct i40e_pf { ...@@ -411,9 +400,6 @@ struct i40e_pf {
#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8) #define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
#define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9) #define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9)
#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) #define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
#ifdef I40E_FCOE
#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
#endif /* I40E_FCOE */
#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) #define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15) #define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16) #define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
...@@ -461,10 +447,6 @@ struct i40e_pf { ...@@ -461,10 +447,6 @@ struct i40e_pf {
*/ */
u64 hw_disabled_flags; u64 hw_disabled_flags;
#ifdef I40E_FCOE
struct i40e_fcoe fcoe;
#endif /* I40E_FCOE */
struct i40e_client_instance *cinst; struct i40e_client_instance *cinst;
bool stat_offsets_loaded; bool stat_offsets_loaded;
struct i40e_hw_port_stats stats; struct i40e_hw_port_stats stats;
...@@ -520,8 +502,6 @@ struct i40e_pf { ...@@ -520,8 +502,6 @@ struct i40e_pf {
*/ */
u16 dcbx_cap; u16 dcbx_cap;
u32 fcoe_hmc_filt_num;
u32 fcoe_hmc_cntx_num;
struct i40e_filter_control_settings filter_settings; struct i40e_filter_control_settings filter_settings;
struct ptp_clock *ptp_clock; struct ptp_clock *ptp_clock;
...@@ -641,11 +621,6 @@ struct i40e_vsi { ...@@ -641,11 +621,6 @@ struct i40e_vsi {
struct rtnl_link_stats64 net_stats_offsets; struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats; struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets; struct i40e_eth_stats eth_stats_offsets;
#ifdef I40E_FCOE
struct i40e_fcoe_stats fcoe_stats;
struct i40e_fcoe_stats fcoe_stats_offsets;
bool fcoe_stat_offsets_loaded;
#endif
u32 tx_restart; u32 tx_restart;
u32 tx_busy; u32 tx_busy;
u64 tx_linearize; u64 tx_linearize;
...@@ -918,11 +893,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi); ...@@ -918,11 +893,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1); u16 uplink, u32 param1);
int i40e_vsi_release(struct i40e_vsi *vsi); int i40e_vsi_release(struct i40e_vsi *vsi);
#ifdef I40E_FCOE
void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc, bool is_add);
#endif
void i40e_service_event_schedule(struct i40e_pf *pf); void i40e_service_event_schedule(struct i40e_pf *pf);
void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
u8 *msg, u16 len); u8 *msg, u16 len);
...@@ -982,20 +952,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) ...@@ -982,20 +952,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
#ifdef I40E_FCOE
void i40e_get_netdev_stats_struct(struct net_device *netdev,
struct rtnl_link_stats64 *storage);
int i40e_set_mac(struct net_device *netdev, void *p);
void i40e_set_rx_mode(struct net_device *netdev);
#endif
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
#ifdef I40E_FCOE
void i40e_tx_timeout(struct net_device *netdev);
int i40e_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
#endif
int i40e_open(struct net_device *netdev); int i40e_open(struct net_device *netdev);
int i40e_close(struct net_device *netdev); int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi); int i40e_vsi_open(struct i40e_vsi *vsi);
...@@ -1009,25 +966,6 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi, ...@@ -1009,25 +966,6 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr); int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
#ifdef I40E_FCOE
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc);
void i40e_netpoll(struct net_device *netdev);
int i40e_fcoe_enable(struct net_device *netdev);
int i40e_fcoe_disable(struct net_device *netdev);
int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
void i40e_init_pf_fcoe(struct i40e_pf *pf);
int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc,
struct sk_buff *skb);
void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, u8 prog_id);
#endif /* I40E_FCOE */
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
#ifdef CONFIG_I40E_DCB #ifdef CONFIG_I40E_DCB
void i40e_dcbnl_flush_apps(struct i40e_pf *pf, void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
......
...@@ -147,6 +147,8 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) ...@@ -147,6 +147,8 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n"); dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
return; return;
} }
memset(&params, 0, sizeof(params));
i40e_client_get_params(vsi, &params);
memcpy(&cdev->lan_info.params, &params, sizeof(struct i40e_params)); memcpy(&cdev->lan_info.params, &params, sizeof(struct i40e_params));
cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client, cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client,
&params); &params);
......
...@@ -1088,33 +1088,6 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) ...@@ -1088,33 +1088,6 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
} }
#ifdef I40E_FCOE
/**
* i40e_get_san_mac_addr - get SAN MAC address
* @hw: pointer to the HW structure
* @mac_addr: pointer to SAN MAC address
*
* Reads the adapter's SAN MAC address from NVM
**/
i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
{
struct i40e_aqc_mac_address_read_data addrs;
i40e_status status;
u16 flags = 0;
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (status)
return status;
if (flags & I40E_AQC_SAN_ADDR_VALID)
ether_addr_copy(mac_addr, addrs.pf_san_mac);
else
status = I40E_ERR_INVALID_MAC_ADDR;
return status;
}
#endif
/** /**
* i40e_read_pba_string - Reads part number string from EEPROM * i40e_read_pba_string - Reads part number string from EEPROM
......
...@@ -484,25 +484,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -484,25 +484,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
vsi->bw_ets_limit_credits[i], vsi->bw_ets_limit_credits[i],
vsi->bw_ets_max_quanta[i]); vsi->bw_ets_max_quanta[i]);
} }
#ifdef I40E_FCOE
if (vsi->type == I40E_VSI_FCOE) {
dev_info(&pf->pdev->dev,
" fcoe_stats: rx_packets = %llu, rx_dwords = %llu, rx_dropped = %llu\n",
vsi->fcoe_stats.rx_fcoe_packets,
vsi->fcoe_stats.rx_fcoe_dwords,
vsi->fcoe_stats.rx_fcoe_dropped);
dev_info(&pf->pdev->dev,
" fcoe_stats: tx_packets = %llu, tx_dwords = %llu\n",
vsi->fcoe_stats.tx_fcoe_packets,
vsi->fcoe_stats.tx_fcoe_dwords);
dev_info(&pf->pdev->dev,
" fcoe_stats: bad_crc = %llu, last_error = %llu\n",
vsi->fcoe_stats.fcoe_bad_fccrc,
vsi->fcoe_stats.fcoe_last_error);
dev_info(&pf->pdev->dev, " fcoe_stats: ddp_count = %llu\n",
vsi->fcoe_stats.fcoe_ddp_count);
}
#endif
} }
/** /**
......
...@@ -162,19 +162,6 @@ static const struct i40e_stats i40e_gstrings_stats[] = { ...@@ -162,19 +162,6 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
}; };
#ifdef I40E_FCOE
static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc),
I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped),
I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets),
I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords),
I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count),
I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error),
I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets),
I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords),
};
#endif /* I40E_FCOE */
#define I40E_QUEUE_STATS_LEN(n) \ #define I40E_QUEUE_STATS_LEN(n) \
(((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \ (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
* 2 /* Tx and Rx together */ \ * 2 /* Tx and Rx together */ \
...@@ -182,17 +169,9 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = { ...@@ -182,17 +169,9 @@ static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) #define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) #define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
#ifdef I40E_FCOE
#define I40E_FCOE_STATS_LEN ARRAY_SIZE(i40e_gstrings_fcoe_stats)
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_FCOE_STATS_LEN + \
I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
#else
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ #define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_MISC_STATS_LEN + \ I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n))) I40E_QUEUE_STATS_LEN((n)))
#endif /* I40E_FCOE */
#define I40E_PFC_STATS_LEN ( \ #define I40E_PFC_STATS_LEN ( \
(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \ (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
...@@ -1530,13 +1509,6 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, ...@@ -1530,13 +1509,6 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat == data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p; sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
} }
#ifdef I40E_FCOE
for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset;
data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
#endif
rcu_read_lock(); rcu_read_lock();
for (j = 0; j < vsi->num_queue_pairs; j++) { for (j = 0; j < vsi->num_queue_pairs; j++) {
tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
...@@ -1624,13 +1596,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, ...@@ -1624,13 +1596,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
i40e_gstrings_misc_stats[i].stat_string); i40e_gstrings_misc_stats[i].stat_string);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
#ifdef I40E_FCOE
for (i = 0; i < I40E_FCOE_STATS_LEN; i++) {
snprintf(p, ETH_GSTRING_LEN, "%s",
i40e_gstrings_fcoe_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
#endif
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i); snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
......
This diff is collapsed.
...@@ -78,7 +78,4 @@ do { \ ...@@ -78,7 +78,4 @@ do { \
} while (0) } while (0)
typedef enum i40e_status_code i40e_status; typedef enum i40e_status_code i40e_status;
#ifdef CONFIG_I40E_FCOE
#define I40E_FCOE
#endif
#endif /* _I40E_OSDEP_H_ */ #endif /* _I40E_OSDEP_H_ */
...@@ -304,9 +304,6 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, ...@@ -304,9 +304,6 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
u32 pba_num_size); u32 pba_num_size);
i40e_status i40e_validate_mac_addr(u8 *mac_addr); i40e_status i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
#ifdef I40E_FCOE
i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
#endif
/* prototype for functions used for NVM access */ /* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw); i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw, i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
......
...@@ -1062,11 +1062,6 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring, ...@@ -1062,11 +1062,6 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
i40e_fd_handle_status(rx_ring, rx_desc, id); i40e_fd_handle_status(rx_ring, rx_desc, id);
#ifdef I40E_FCOE
else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
(id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
i40e_fcoe_handle_status(rx_ring, rx_desc, id);
#endif
} }
/** /**
...@@ -1154,7 +1149,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) ...@@ -1154,7 +1149,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
PAGE_SIZE, PAGE_SIZE,
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
I40E_RX_DMA_ATTR); I40E_RX_DMA_ATTR);
__free_pages(rx_bi->page, 0); __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
rx_bi->page = NULL; rx_bi->page = NULL;
rx_bi->page_offset = 0; rx_bi->page_offset = 0;
...@@ -1299,6 +1294,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, ...@@ -1299,6 +1294,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
bi->dma = dma; bi->dma = dma;
bi->page = page; bi->page = page;
bi->page_offset = 0; bi->page_offset = 0;
bi->pagecnt_bias = 1;
return true; return true;
} }
...@@ -1391,8 +1387,6 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -1391,8 +1387,6 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
* @vsi: the VSI we care about * @vsi: the VSI we care about
* @skb: skb currently being received and modified * @skb: skb currently being received and modified
* @rx_desc: the receive descriptor * @rx_desc: the receive descriptor
*
* skb->protocol must be set before this function is called
**/ **/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi, static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb, struct sk_buff *skb,
...@@ -1554,12 +1548,12 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring, ...@@ -1554,12 +1548,12 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb_record_rx_queue(skb, rx_ring->queue_index); skb_record_rx_queue(skb, rx_ring->queue_index);
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
} }
/** /**
...@@ -1604,7 +1598,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, ...@@ -1604,7 +1598,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */ /* transfer page from old buffer to new buffer */
*new_buff = *old_buff; new_buff->dma = old_buff->dma;
new_buff->page = old_buff->page;
new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
} }
/** /**
...@@ -1656,6 +1653,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1656,6 +1653,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE >= 8192) #if (PAGE_SIZE >= 8192)
unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif #endif
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
/* Is any reuse possible? */ /* Is any reuse possible? */
if (unlikely(!i40e_page_is_reusable(page))) if (unlikely(!i40e_page_is_reusable(page)))
...@@ -1663,7 +1661,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1663,7 +1661,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != 1)) if (unlikely(page_count(page) != pagecnt_bias))
return false; return false;
/* flip page offset to other buffer */ /* flip page offset to other buffer */
...@@ -1676,9 +1674,14 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1676,9 +1674,14 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
return false; return false;
#endif #endif
/* Inc ref count on page before passing it up to the stack */ /* If we have drained the page fragment pool we need to update
get_page(page); * the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
if (unlikely(pagecnt_bias == 1)) {
page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
return true; return true;
} }
...@@ -1725,7 +1728,6 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, ...@@ -1725,7 +1728,6 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
return true; return true;
/* this page cannot be reused so discard it */ /* this page cannot be reused so discard it */
__free_pages(page, 0);
return false; return false;
} }
...@@ -1819,6 +1821,8 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, ...@@ -1819,6 +1821,8 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
/* we are not reusing the buffer so unmap it */ /* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
} }
/* clear contents of buffer_info */ /* clear contents of buffer_info */
...@@ -1930,6 +1934,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1930,6 +1934,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/ */
if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
skb = NULL;
continue; continue;
} }
...@@ -1948,15 +1953,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1948,15 +1953,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* populate checksum, VLAN, and protocol */ /* populate checksum, VLAN, and protocol */
i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
#ifdef I40E_FCOE
if (unlikely(
i40e_rx_is_fcoe(rx_ptype) &&
!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
dev_kfree_skb_any(skb);
continue;
}
#endif
vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
...@@ -2332,15 +2328,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2332,15 +2328,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
* Returns error code indicate the frame should be dropped upon error and the * Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly. * otherwise returns 0 to indicate the flags has been set properly.
**/ **/
#ifdef I40E_FCOE
inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
#else
static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, struct i40e_ring *tx_ring,
u32 *flags) u32 *flags)
#endif
{ {
__be16 protocol = skb->protocol; __be16 protocol = skb->protocol;
u32 tx_flags = 0; u32 tx_flags = 0;
...@@ -2848,15 +2838,9 @@ bool __i40e_chk_linearize(struct sk_buff *skb) ...@@ -2848,15 +2838,9 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
* @td_cmd: the command field in the descriptor * @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc * @td_offset: offset for checksum or crc
**/ **/
#ifdef I40E_FCOE
inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
#else
static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags, struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset) const u8 hdr_len, u32 td_cmd, u32 td_offset)
#endif
{ {
unsigned int data_len = skb->data_len; unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb); unsigned int size = skb_headlen(skb);
......
...@@ -258,7 +258,12 @@ struct i40e_tx_buffer { ...@@ -258,7 +258,12 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer { struct i40e_rx_buffer {
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
unsigned int page_offset; #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
#else
__u16 page_offset;
#endif
__u16 pagecnt_bias;
}; };
struct i40e_queue_stats { struct i40e_queue_stats {
...@@ -396,13 +401,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring); ...@@ -396,13 +401,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40e_free_tx_resources(struct i40e_ring *tx_ring); void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring); void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget); int i40e_napi_poll(struct napi_struct *napi, int budget);
#ifdef I40E_FCOE
void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset);
int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, u32 *flags);
#endif
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
...@@ -485,16 +483,6 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) ...@@ -485,16 +483,6 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
return count != I40E_MAX_BUFFER_TXD; return count != I40E_MAX_BUFFER_TXD;
} }
/**
* i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
* @ptype: the packet type field from Rx descriptor write-back
**/
static inline bool i40e_rx_is_fcoe(u16 ptype)
{
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
/** /**
* txring_txq - Find the netdev Tx ring based on the i40e Tx ring * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ring: Tx ring to find the netdev equivalent of * @ring: Tx ring to find the netdev equivalent of
......
...@@ -1213,25 +1213,6 @@ struct i40e_veb_tc_stats { ...@@ -1213,25 +1213,6 @@ struct i40e_veb_tc_stats {
u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
}; };
#ifdef I40E_FCOE
/* Statistics collected per function for FCoE */
struct i40e_fcoe_stats {
u64 rx_fcoe_packets; /* fcoeprc */
u64 rx_fcoe_dwords; /* focedwrc */
u64 rx_fcoe_dropped; /* fcoerpdc */
u64 tx_fcoe_packets; /* fcoeptc */
u64 tx_fcoe_dwords; /* focedwtc */
u64 fcoe_bad_fccrc; /* fcoecrc */
u64 fcoe_last_error; /* fcoelast */
u64 fcoe_ddp_count; /* fcoeddpc */
};
/* offset to per function FCoE statistics block */
#define I40E_FCOE_VF_STAT_OFFSET 0
#define I40E_FCOE_PF_STAT_OFFSET 128
#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF)
#endif
/* Statistics collected by the MAC */ /* Statistics collected by the MAC */
struct i40e_hw_port_stats { struct i40e_hw_port_stats {
/* eth stats collected by the port */ /* eth stats collected by the port */
...@@ -1319,125 +1300,6 @@ struct i40e_hw_port_stats { ...@@ -1319,125 +1300,6 @@ struct i40e_hw_port_stats {
#define I40E_SRRD_SRCTL_ATTEMPTS 100000 #define I40E_SRRD_SRCTL_ATTEMPTS 100000
#ifdef I40E_FCOE
/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */
enum i40E_fcoe_tx_ctx_desc_cmd_bits {
I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10,
I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20,
I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40,
I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80
};
/* FCoE DDP Context descriptor */
struct i40e_fcoe_ddp_context_desc {
__le64 rsvd;
__le64 type_cmd_foff_lsize;
};
#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0
#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \
I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4
#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)
enum i40e_fcoe_ddp_ctx_desc_cmd_bits {
I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */
I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */
I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */
I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */
I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */
I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */
};
#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16
#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \
I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32
#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \
I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
/* FCoE DDP/DWO Queue Context descriptor */
struct i40e_fcoe_queue_context_desc {
__le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */
__le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */
};
#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0
#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \
I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12
#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \
I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0
#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \
I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13
#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \
I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
enum i40e_fcoe_queue_ctx_desc_tph_bits {
I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1,
I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2
};
#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30
#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \
I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
/* FCoE DDP/DWO Filter Context descriptor */
struct i40e_fcoe_filter_context_desc {
__le32 param;
__le16 seqn;
/* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
__le16 rsvd_dmaindx;
/* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
__le64 flags_rsvd_lanq;
};
#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \
I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
enum i40e_fcoe_filter_ctx_desc_flags_bits {
I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00,
I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01,
I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00,
I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02,
I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00,
I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04
};
#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0
#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \
I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \
I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53
#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \
I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
#endif /* I40E_FCOE */
enum i40e_switch_element_types { enum i40e_switch_element_types {
I40E_SWITCH_ELEMENT_TYPE_MAC = 1, I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
I40E_SWITCH_ELEMENT_TYPE_PF = 2, I40E_SWITCH_ELEMENT_TYPE_PF = 2,
......
...@@ -809,6 +809,11 @@ static void i40e_free_vf_res(struct i40e_vf *vf) ...@@ -809,6 +809,11 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
u32 reg_idx, reg; u32 reg_idx, reg;
int i, msix_vf; int i, msix_vf;
/* Start by disabling VF's configuration API to prevent the OS from
* accessing the VF's VSI after it's freed / invalidated.
*/
clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
/* free vsi & disconnect it from the parent uplink */ /* free vsi & disconnect it from the parent uplink */
if (vf->lan_vsi_idx) { if (vf->lan_vsi_idx) {
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
...@@ -848,7 +853,6 @@ static void i40e_free_vf_res(struct i40e_vf *vf) ...@@ -848,7 +853,6 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
/* reset some of the state variables keeping track of the resources */ /* reset some of the state variables keeping track of the resources */
vf->num_queue_pairs = 0; vf->num_queue_pairs = 0;
vf->vf_states = 0; vf->vf_states = 0;
clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
} }
/** /**
...@@ -939,6 +943,14 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) ...@@ -939,6 +943,14 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
/* warn the VF */ /* warn the VF */
clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled
* in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
* It's normally disabled in i40e_free_vf_res(), but it's safer
* to do it earlier to give some time to finish to any VF config
* functions that may still be running at this point.
*/
clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
/* In the case of a VFLR, the HW has already reset the VF and we /* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register. * just need to clean up, so don't hit the VFRTRIG register.
*/ */
...@@ -982,11 +994,6 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) ...@@ -982,11 +994,6 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (!rsd) if (!rsd)
dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
vf->vf_id); vf->vf_id);
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
/* clear the reset bit in the VPGEN_VFRTRIG reg */
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
/* On initial reset, we won't have any queues */ /* On initial reset, we won't have any queues */
if (vf->lan_vsi_idx == 0) if (vf->lan_vsi_idx == 0)
...@@ -994,8 +1001,24 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) ...@@ -994,8 +1001,24 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
complete_reset: complete_reset:
/* reallocate VF resources to reset the VSI state */ /* free VF resources to begin resetting the VSI state */
i40e_free_vf_res(vf); i40e_free_vf_res(vf);
/* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
* By doing this we allow HW to access VF memory at any point. If we
* did it any sooner, HW could access memory while it was being freed
* in i40e_free_vf_res(), causing an IOMMU fault.
*
* On the other hand, this needs to be done ASAP, because the VF driver
* is waiting for this to happen and may report a timeout. It's
* harmless, but it gets logged into Guest OS kernel log, so best avoid
* it.
*/
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
/* reallocate VF resources to finish resetting the VSI state */
if (!i40e_alloc_vf_res(vf)) { if (!i40e_alloc_vf_res(vf)) {
int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
i40e_enable_vf_mappings(vf); i40e_enable_vf_mappings(vf);
...@@ -1006,7 +1029,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) ...@@ -1006,7 +1029,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
i40e_notify_client_of_vf_reset(pf, abs_vf_id); i40e_notify_client_of_vf_reset(pf, abs_vf_id);
vf->num_vlan = 0; vf->num_vlan = 0;
} }
/* tell the VF the reset is done */
/* Tell the VF driver the reset is done. This needs to be done only
* after VF has been fully initialized, because the VF driver may
* request resources immediately after setting this flag.
*/
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
i40e_flush(hw); i40e_flush(hw);
......
...@@ -87,7 +87,6 @@ struct i40e_vf { ...@@ -87,7 +87,6 @@ struct i40e_vf {
u16 stag; u16 stag;
struct i40e_virtchnl_ether_addr default_lan_addr; struct i40e_virtchnl_ether_addr default_lan_addr;
struct i40e_virtchnl_ether_addr default_fcoe_addr;
u16 port_vlan_id; u16 port_vlan_id;
bool pf_set_mac; /* The VMM admin set the VF MAC address */ bool pf_set_mac; /* The VMM admin set the VF MAC address */
bool trusted; bool trusted;
......
...@@ -526,7 +526,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) ...@@ -526,7 +526,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
PAGE_SIZE, PAGE_SIZE,
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
I40E_RX_DMA_ATTR); I40E_RX_DMA_ATTR);
__free_pages(rx_bi->page, 0); __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
rx_bi->page = NULL; rx_bi->page = NULL;
rx_bi->page_offset = 0; rx_bi->page_offset = 0;
...@@ -671,6 +671,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, ...@@ -671,6 +671,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
bi->dma = dma; bi->dma = dma;
bi->page = page; bi->page = page;
bi->page_offset = 0; bi->page_offset = 0;
bi->pagecnt_bias = 1;
return true; return true;
} }
...@@ -763,8 +764,6 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -763,8 +764,6 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
* @vsi: the VSI we care about * @vsi: the VSI we care about
* @skb: skb currently being received and modified * @skb: skb currently being received and modified
* @rx_desc: the receive descriptor * @rx_desc: the receive descriptor
*
* skb->protocol must be set before this function is called
**/ **/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi, static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb, struct sk_buff *skb,
...@@ -916,12 +915,12 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring, ...@@ -916,12 +915,12 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
{ {
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb_record_rx_queue(skb, rx_ring->queue_index); skb_record_rx_queue(skb, rx_ring->queue_index);
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
} }
/** /**
...@@ -966,7 +965,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, ...@@ -966,7 +965,10 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */ /* transfer page from old buffer to new buffer */
*new_buff = *old_buff; new_buff->dma = old_buff->dma;
new_buff->page = old_buff->page;
new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
} }
/** /**
...@@ -1018,6 +1020,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1018,6 +1020,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE >= 8192) #if (PAGE_SIZE >= 8192)
unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif #endif
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
/* Is any reuse possible? */ /* Is any reuse possible? */
if (unlikely(!i40e_page_is_reusable(page))) if (unlikely(!i40e_page_is_reusable(page)))
...@@ -1025,7 +1028,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1025,7 +1028,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != 1)) if (unlikely(page_count(page) != pagecnt_bias))
return false; return false;
/* flip page offset to other buffer */ /* flip page offset to other buffer */
...@@ -1038,8 +1041,14 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -1038,8 +1041,14 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
return false; return false;
#endif #endif
/* Inc ref count on page before passing it up to the stack */ /* If we have drained the page fragment pool we need to update
get_page(page); * the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
if (unlikely(pagecnt_bias == 1)) {
page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
return true; return true;
} }
...@@ -1087,7 +1096,6 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, ...@@ -1087,7 +1096,6 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
return true; return true;
/* this page cannot be reused so discard it */ /* this page cannot be reused so discard it */
__free_pages(page, 0);
return false; return false;
} }
...@@ -1181,6 +1189,8 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, ...@@ -1181,6 +1189,8 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
/* we are not reusing the buffer so unmap it */ /* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias);
} }
/* clear contents of buffer_info */ /* clear contents of buffer_info */
...@@ -1287,6 +1297,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1287,6 +1297,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/ */
if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
skb = NULL;
continue; continue;
} }
......
...@@ -244,7 +244,12 @@ struct i40e_tx_buffer { ...@@ -244,7 +244,12 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer { struct i40e_rx_buffer {
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
unsigned int page_offset; #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
#else
__u16 page_offset;
#endif
__u16 pagecnt_bias;
}; };
struct i40e_queue_stats { struct i40e_queue_stats {
...@@ -463,19 +468,7 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) ...@@ -463,19 +468,7 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
/* we can support up to 8 data buffers for a single send */ /* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD; return count != I40E_MAX_BUFFER_TXD;
} }
/**
* i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
* @ptype: the packet type field from Rx descriptor write-back
**/
static inline bool i40e_rx_is_fcoe(u16 ptype)
{
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
/** /**
* txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ring: Tx ring to find the netdev equivalent of * @ring: Tx ring to find the netdev equivalent of
**/ **/
static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
......
...@@ -34,12 +34,12 @@ static struct i40e_ops i40evf_lan_ops = { ...@@ -34,12 +34,12 @@ static struct i40e_ops i40evf_lan_ops = {
**/ **/
void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len) void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
{ {
struct i40evf_adapter *adapter = vsi->back; struct i40e_client_instance *cinst;
struct i40e_client_instance *cinst = adapter->cinst;
if (!vsi) if (!vsi)
return; return;
cinst = vsi->back->cinst;
if (!cinst || !cinst->client || !cinst->client->ops || if (!cinst || !cinst->client || !cinst->client->ops ||
!cinst->client->ops->virtchnl_receive) { !cinst->client->ops->virtchnl_receive) {
dev_dbg(&vsi->back->pdev->dev, dev_dbg(&vsi->back->pdev->dev,
...@@ -58,12 +58,13 @@ void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len) ...@@ -58,12 +58,13 @@ void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
**/ **/
void i40evf_notify_client_l2_params(struct i40e_vsi *vsi) void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
{ {
struct i40evf_adapter *adapter = vsi->back; struct i40e_client_instance *cinst;
struct i40e_client_instance *cinst = adapter->cinst;
struct i40e_params params; struct i40e_params params;
if (!vsi) if (!vsi)
return; return;
cinst = vsi->back->cinst;
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
params.mtu = vsi->netdev->mtu; params.mtu = vsi->netdev->mtu;
params.link_up = vsi->back->link_up; params.link_up = vsi->back->link_up;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment