Commit d39a9ffc authored by David S. Miller's avatar David S. Miller

Merge branch 'intel-next'

Aaron Brown says:

====================
Intel Wired LAN Driver Updates

This series contains updates to the i40e and i40evf drivers.

Vasu adds FCOE support, build options and a documentation pointer to i40e.

Shannon exposes a Firmware API request used to do register writes on the
driver's behalf and disables local loopback on VMDQ VSI in order to stop the
VEB from echoing the VMDQ packets back at the VSI.

Ashish corrects the vf_id offset for virtchnl messages in the case of multiple
PFs, removes support for vf unicast promiscuos mode to disallow VFs from
receiving traffic intended for another VF, updates the vfr_stat state check to
handle the existing and future mechanism and adds an adapter state check to
prevent re-arming the watchdog timer after i40evf_remove has been called and
the timer has been deleted.

Serey fixes an issue where a guest OS would panic when removing the vf driver
while the device is being reset due to an attempt to clean a non initialized
mac_filter_list.

Akeem makes a minor comment change.

Jessie changes an instance of sprintf to snprintf that was missed when the
driver was converted to use snprintf everywhere.

Mitch plugs a few memory leaks.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a4f090fd 8bb1a540
......@@ -69,8 +69,11 @@ Additional Configurations
FCoE
----
Fiber Channel over Ethernet (FCoE) hardware offload is not currently
supported.
The driver supports Fiber Channel over Ethernet (FCoE) and Data Center
Bridging (DCB) functionality. Configuring DCB and FCoE is outside the scope
of this driver doc. Refer to http://www.open-fcoe.org/ for FCoE project
information and http://www.open-lldp.org/ or email list
e1000-eedc@lists.sourceforge.net for DCB information.
MAC and VLAN anti-spoofing feature
----------------------------------
......
......@@ -44,3 +44,4 @@ i40e-objs := i40e_main.o \
i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
......@@ -54,6 +54,9 @@
#include <linux/ptp_clock_kernel.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#ifdef I40E_FCOE
#include "i40e_fcoe.h"
#endif
#include "i40e_virtchnl.h"
#include "i40e_virtchnl_pf.h"
#include "i40e_txrx.h"
......@@ -79,6 +82,10 @@
#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */
#define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32
#ifdef I40E_FCOE
#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */
#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */
#endif /* I40E_FCOE */
#define I40E_MAX_AQ_BUF_SIZE 4096
#define I40E_AQ_LEN 32
#define I40E_AQ_WORK_LIMIT 16
......@@ -225,6 +232,10 @@ struct i40e_pf {
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
u16 num_req_vfs; /* num vfs requested for this vf */
u16 num_vf_qps; /* num queue pairs per vf */
#ifdef I40E_FCOE
u16 num_fcoe_qps; /* num fcoe queues this pf has set up */
u16 num_fcoe_msix; /* num queue vectors per fcoe pool */
#endif /* I40E_FCOE */
u16 num_lan_qps; /* num lan queues this pf has set up */
u16 num_lan_msix; /* num queue vectors for the base pf vsi */
int queues_left; /* queues left unclaimed */
......@@ -265,6 +276,9 @@ struct i40e_pf {
#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
#ifdef I40E_FCOE
#define I40E_FLAG_FCOE_ENABLED (u64)(1 << 11)
#endif /* I40E_FCOE */
#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
......@@ -286,6 +300,10 @@ struct i40e_pf {
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
#ifdef I40E_FCOE
struct i40e_fcoe fcoe;
#endif /* I40E_FCOE */
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
struct i40e_hw_port_stats stats_offsets;
......@@ -408,6 +426,11 @@ struct i40e_vsi {
struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
#ifdef I40E_FCOE
struct i40e_fcoe_stats fcoe_stats;
struct i40e_fcoe_stats fcoe_stats_offsets;
bool fcoe_stat_offsets_loaded;
#endif
u32 tx_restart;
u32 tx_busy;
u32 rx_buf_failed;
......@@ -598,6 +621,11 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
struct i40e_vsi *start_vsi);
#ifdef I40E_FCOE
void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc, bool is_add);
#endif
int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
......@@ -624,7 +652,21 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
#ifdef I40E_FCOE
struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct net_device *netdev,
struct rtnl_link_stats64 *storage);
int i40e_set_mac(struct net_device *netdev, void *p);
void i40e_set_rx_mode(struct net_device *netdev);
#endif
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
#ifdef I40E_FCOE
void i40e_tx_timeout(struct net_device *netdev);
int i40e_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
#endif
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
......@@ -634,6 +676,26 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
#ifdef I40E_FCOE
int i40e_open(struct net_device *netdev);
int i40e_close(struct net_device *netdev);
int i40e_setup_tc(struct net_device *netdev, u8 tc);
void i40e_netpoll(struct net_device *netdev);
int i40e_fcoe_enable(struct net_device *netdev);
int i40e_fcoe_disable(struct net_device *netdev);
int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
int i40e_init_pf_fcoe(struct i40e_pf *pf);
int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc,
struct sk_buff *skb);
void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, u8 prog_id);
#endif /* I40E_FCOE */
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
#ifdef CONFIG_I40E_DCB
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
......
......@@ -709,6 +709,33 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
}
#ifdef I40E_FCOE
/**
* i40e_get_san_mac_addr - get SAN MAC address
* @hw: pointer to the HW structure
* @mac_addr: pointer to SAN MAC address
*
* Reads the adapter's SAN MAC address from NVM
**/
i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
{
struct i40e_aqc_mac_address_read_data addrs;
i40e_status status;
u16 flags = 0;
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (status)
return status;
if (flags & I40E_AQC_SAN_ADDR_VALID)
memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
else
status = I40E_ERR_INVALID_MAC_ADDR;
return status;
}
#endif
/**
* i40e_get_media_type - Gets media type
......@@ -1974,6 +2001,35 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
return status;
}
/**
* i40e_aq_debug_write_register
* @hw: pointer to the hw struct
* @reg_addr: register address
* @reg_val: register value
* @cmd_details: pointer to command details structure or NULL
*
* Write to a register using the admin queue commands
**/
i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
u32 reg_addr, u64 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_reg_read_write *cmd =
(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
cmd->address = cpu_to_le32(reg_addr);
cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
* i40e_aq_set_hmc_resource_profile
* @hw: pointer to the hw struct
......
......@@ -697,6 +697,25 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
vsi->bw_ets_limit_credits[i],
vsi->bw_ets_max_quanta[i]);
}
#ifdef I40E_FCOE
if (vsi->type == I40E_VSI_FCOE) {
dev_info(&pf->pdev->dev,
" fcoe_stats: rx_packets = %llu, rx_dwords = %llu, rx_dropped = %llu\n",
vsi->fcoe_stats.rx_fcoe_packets,
vsi->fcoe_stats.rx_fcoe_dwords,
vsi->fcoe_stats.rx_fcoe_dropped);
dev_info(&pf->pdev->dev,
" fcoe_stats: tx_packets = %llu, tx_dwords = %llu\n",
vsi->fcoe_stats.tx_fcoe_packets,
vsi->fcoe_stats.tx_fcoe_dwords);
dev_info(&pf->pdev->dev,
" fcoe_stats: bad_crc = %llu, last_error = %llu\n",
vsi->fcoe_stats.fcoe_bad_fccrc,
vsi->fcoe_stats.fcoe_last_error);
dev_info(&pf->pdev->dev, " fcoe_stats: ddp_count = %llu\n",
vsi->fcoe_stats.fcoe_ddp_count);
}
#endif
}
/**
......
......@@ -155,6 +155,19 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
};
#ifdef I40E_FCOE
static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc),
I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped),
I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets),
I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords),
I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count),
I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error),
I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets),
I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords),
};
#endif /* I40E_FCOE */
#define I40E_QUEUE_STATS_LEN(n) \
(((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
* 2 /* Tx and Rx together */ \
......@@ -162,9 +175,17 @@ static struct i40e_stats i40e_gstrings_stats[] = {
#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
#ifdef I40E_FCOE
#define I40E_FCOE_STATS_LEN ARRAY_SIZE(i40e_gstrings_fcoe_stats)
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_FCOE_STATS_LEN + \
I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
#else
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
#endif /* I40E_FCOE */
#define I40E_PFC_STATS_LEN ( \
(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
......@@ -1112,6 +1133,13 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
#ifdef I40E_FCOE
for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset;
data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
#endif
rcu_read_lock();
for (j = 0; j < vsi->num_queue_pairs; j++) {
tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
......@@ -1193,6 +1221,13 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
i40e_gstrings_misc_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
#ifdef I40E_FCOE
for (i = 0; i < I40E_FCOE_STATS_LEN; i++) {
snprintf(p, ETH_GSTRING_LEN, "%s",
i40e_gstrings_fcoe_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
#endif
for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
p += ETH_GSTRING_LEN;
......
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#include <linux/if_ether.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/fc/fc_fs.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/libfcoe.h>
#include "i40e.h"
#include "i40e_fcoe.h"
/**
* i40e_rx_is_fip - returns true if the rx packet type is FIP
* @ptype: the packet type field from rx descriptor write-back
**/
static inline bool i40e_rx_is_fip(u16 ptype)
{
return ptype == I40E_RX_PTYPE_L2_FIP_PAY2;
}
/**
* i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
* @ptype: the packet type field from rx descriptor write-back
**/
static inline bool i40e_rx_is_fcoe(u16 ptype)
{
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
/**
* i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
* @sof: the FCoE start of frame delimiter
**/
static inline bool i40e_fcoe_sof_is_class2(u8 sof)
{
return (sof == FC_SOF_I2) || (sof == FC_SOF_N2);
}
/**
* i40e_fcoe_sof_is_class3 - returns true if this is a FC Class 3 SOF
* @sof: the FCoE start of frame delimiter
**/
static inline bool i40e_fcoe_sof_is_class3(u8 sof)
{
return (sof == FC_SOF_I3) || (sof == FC_SOF_N3);
}
/**
* i40e_fcoe_sof_is_supported - returns true if the FC SOF is supported by HW
* @sof: the input SOF value from the frame
**/
static inline bool i40e_fcoe_sof_is_supported(u8 sof)
{
return i40e_fcoe_sof_is_class2(sof) ||
i40e_fcoe_sof_is_class3(sof);
}
/**
* i40e_fcoe_fc_sof - pull the SOF from FCoE header in the frame
* @skb: the frame whose EOF is to be pulled from
**/
static inline int i40e_fcoe_fc_sof(struct sk_buff *skb, u8 *sof)
{
*sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
if (!i40e_fcoe_sof_is_supported(*sof))
return -EINVAL;
return 0;
}
/**
* i40e_fcoe_eof_is_supported - returns true if the EOF is supported by HW
* @eof: the input EOF value from the frame
**/
static inline bool i40e_fcoe_eof_is_supported(u8 eof)
{
return (eof == FC_EOF_N) || (eof == FC_EOF_T) ||
(eof == FC_EOF_NI) || (eof == FC_EOF_A);
}
/**
* i40e_fcoe_fc_eof - pull EOF from FCoE trailer in the frame
* @skb: the frame whose EOF is to be pulled from
**/
static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
{
/* the first byte of the last dword is EOF */
skb_copy_bits(skb, skb->len - 4, eof, 1);
if (!i40e_fcoe_eof_is_supported(*eof))
return -EINVAL;
return 0;
}
/**
* i40e_fcoe_ctxt_eof - convert input FC EOF for descriptor programming
* @eof: the input eof value from the frame
*
* The FC EOF is converted to the value understood by HW for descriptor
* programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
* first.
**/
static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
{
switch (eof) {
case FC_EOF_N:
return I40E_TX_DESC_CMD_L4T_EOFT_EOF_N;
case FC_EOF_T:
return I40E_TX_DESC_CMD_L4T_EOFT_EOF_T;
case FC_EOF_NI:
return I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI;
case FC_EOF_A:
return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
default:
/* FIXME: still returns 0 */
pr_err("Unrecognized EOF %x\n", eof);
return 0;
}
}
/**
* i40e_fcoe_xid_is_valid - returns true if the exchange id is valid
* @xid: the exchange id
**/
static inline bool i40e_fcoe_xid_is_valid(u16 xid)
{
return (xid != FC_XID_UNKNOWN) && (xid < I40E_FCOE_DDP_MAX);
}
/**
* i40e_fcoe_ddp_unmap - unmap the mapped sglist associated
* @pf: pointer to pf
* @ddp: sw DDP context
*
* Unmap the scatter-gather list associated with the given SW DDP context
*
* Returns: data length already ddp-ed in bytes
*
**/
static inline void i40e_fcoe_ddp_unmap(struct i40e_pf *pf,
struct i40e_fcoe_ddp *ddp)
{
if (test_and_set_bit(__I40E_FCOE_DDP_UNMAPPED, &ddp->flags))
return;
if (ddp->sgl) {
dma_unmap_sg(&pf->pdev->dev, ddp->sgl, ddp->sgc,
DMA_FROM_DEVICE);
ddp->sgl = NULL;
ddp->sgc = 0;
}
if (ddp->pool) {
dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
ddp->pool = NULL;
}
}
/**
* i40e_fcoe_ddp_clear - clear the given SW DDP context
* @ddp - SW DDP context
**/
static inline void i40e_fcoe_ddp_clear(struct i40e_fcoe_ddp *ddp)
{
memset(ddp, 0, sizeof(struct i40e_fcoe_ddp));
ddp->xid = FC_XID_UNKNOWN;
ddp->flags = __I40E_FCOE_DDP_NONE;
}
/**
* i40e_fcoe_progid_is_fcoe - check if the prog_id is for FCoE
* @id: the prog id for the programming status Rx descriptor write-back
**/
static inline bool i40e_fcoe_progid_is_fcoe(u8 id)
{
return (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
(id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS);
}
/**
* i40e_fcoe_fc_get_xid - get xid from the frame header
* @fh: the fc frame header
*
* In case the incoming frame's exchange is originated from
* the initiator, then received frame's exchange id is ANDed
* with fc_cpu_mask bits to get the same cpu on which exchange
* was originated, otherwise just use the current cpu.
*
* Returns ox_id if exchange originator, rx_id if responder
**/
static inline u16 i40e_fcoe_fc_get_xid(struct fc_frame_header *fh)
{
u32 f_ctl = ntoh24(fh->fh_f_ctl);
return (f_ctl & FC_FC_EX_CTX) ?
be16_to_cpu(fh->fh_ox_id) :
be16_to_cpu(fh->fh_rx_id);
}
/**
* i40e_fcoe_fc_frame_header - get fc frame header from skb
* @skb: packet
*
* This checks if there is a VLAN header and returns the data
* pointer to the start of the fc_frame_header.
*
* Returns pointer to the fc_frame_header
**/
static inline struct fc_frame_header *i40e_fcoe_fc_frame_header(
struct sk_buff *skb)
{
void *fh = skb->data + sizeof(struct fcoe_hdr);
if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
fh += sizeof(struct vlan_hdr);
return (struct fc_frame_header *)fh;
}
/**
* i40e_fcoe_ddp_put - release the DDP context for a given exchange id
* @netdev: the corresponding net_device
* @xid: the exchange id that corresponding DDP context will be released
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_done
* and it is expected to be called by ULD, i.e., FCP layer of libfc
* to release the corresponding ddp context when the I/O is done.
*
* Returns : data length already ddp-ed in bytes
**/
static int i40e_fcoe_ddp_put(struct net_device *netdev, u16 xid)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
int len = 0;
struct i40e_fcoe_ddp *ddp = &fcoe->ddp[xid];
if (!fcoe || !ddp)
goto out;
if (test_bit(__I40E_FCOE_DDP_DONE, &ddp->flags))
len = ddp->len;
i40e_fcoe_ddp_unmap(pf, ddp);
out:
return len;
}
/**
* i40e_fcoe_sw_init - sets up the HW for FCoE
* @pf: pointer to pf
*
* Returns 0 if FCoE is supported otherwise the error code
**/
int i40e_init_pf_fcoe(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 val;
pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
pf->num_fcoe_qps = 0;
pf->fcoe_hmc_cntx_num = 0;
pf->fcoe_hmc_filt_num = 0;
if (!pf->hw.func_caps.fcoe) {
dev_info(&pf->pdev->dev, "FCoE capability is disabled\n");
return 0;
}
if (!pf->hw.func_caps.dcb) {
dev_warn(&pf->pdev->dev,
"Hardware is not DCB capable not enabling FCoE.\n");
return 0;
}
/* enable FCoE hash filter */
val = rd32(hw, I40E_PFQF_HENA(1));
val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
wr32(hw, I40E_PFQF_HENA(1), val);
/* enable flag */
pf->flags |= I40E_FLAG_FCOE_ENABLED;
pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
/* Reserve 4K DDP contexts and 20K filter size for FCoE */
pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
I40E_DMA_CNTX_BASE_SIZE;
pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
(1 << I40E_HASH_FILTER_SIZE_16K) *
I40E_HASH_FILTER_BASE_SIZE;
/* FCoE object: max 16K filter buckets and 4K DMA contexts */
pf->filter_settings.fcoe_filt_num = I40E_HASH_FILTER_SIZE_16K;
pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K;
/* Setup max frame with FCoE_MTU plus L2 overheads */
val = rd32(hw, I40E_GLFCOE_RCTL);
val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK;
val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
<< I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT);
wr32(hw, I40E_GLFCOE_RCTL, val);
dev_info(&pf->pdev->dev, "FCoE is supported.\n");
return 0;
}
/**
* i40e_get_fcoe_tc_map - Return TC map for FCoE APP
* @pf: pointer to pf
*
**/
u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
{
struct i40e_ieee_app_priority_table app;
struct i40e_hw *hw = &pf->hw;
u8 enabled_tc = 0;
u8 tc, i;
/* Get the FCoE APP TLV */
struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
for (i = 0; i < dcbcfg->numapps; i++) {
app = dcbcfg->app[i];
if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app.protocolid == ETH_P_FCOE) {
tc = dcbcfg->etscfg.prioritytable[app.priority];
enabled_tc |= (1 << tc);
break;
}
}
/* TC0 if there is no TC defined for FCoE APP TLV */
enabled_tc = enabled_tc ? enabled_tc : 0x1;
return enabled_tc;
}
/**
* i40e_fcoe_vsi_init - prepares the VSI context for creating a FCoE VSI
* @vsi: pointer to the associated VSI struct
* @ctxt: pointer to the associated VSI context to be passed to HW
*
* Returns 0 on success or < 0 on error
**/
int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
{
struct i40e_aqc_vsi_properties_data *info = &ctxt->info;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u8 enabled_tc = 0;
if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
dev_err(&pf->pdev->dev,
"FCoE is not enabled for this device\n");
return -EPERM;
}
/* initialize the hardware for FCoE */
ctxt->pf_num = hw->pf_id;
ctxt->vf_num = 0;
ctxt->uplink_seid = vsi->uplink_seid;
ctxt->connection_type = 0x1;
ctxt->flags = I40E_AQ_VSI_TYPE_PF;
/* FCoE VSI would need the following sections */
info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID |
I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
/* FCoE VSI does not need these sections */
info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID |
I40E_AQ_VSI_PROP_VLAN_VALID |
I40E_AQ_VSI_PROP_CAS_PV_VALID |
I40E_AQ_VSI_PROP_INGRESS_UP_VALID |
I40E_AQ_VSI_PROP_EGRESS_UP_VALID));
enabled_tc = i40e_get_fcoe_tc_map(pf);
i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true);
/* set up queue option section: only enable FCoE */
info->queueing_opt_flags = I40E_AQ_VSI_QUE_OPT_FCOE_ENA;
return 0;
}
/**
* i40e_fcoe_enable - this is the implementation of ndo_fcoe_enable,
* indicating the upper FCoE protocol stack is ready to use FCoE
* offload features.
*
* @netdev: pointer to the netdev that FCoE is created on
*
* Returns 0 on success
*
* in RTNL
*
**/
int i40e_fcoe_enable(struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
netdev_err(netdev, "HW does not support FCoE.\n");
return -ENODEV;
}
if (vsi->type != I40E_VSI_FCOE) {
netdev_err(netdev, "interface does not support FCoE.\n");
return -EBUSY;
}
atomic_inc(&fcoe->refcnt);
return 0;
}
/**
* i40e_fcoe_disable- disables FCoE for upper FCoE protocol stack.
* @dev: pointer to the netdev that FCoE is created on
*
* Returns 0 on success
*
**/
int i40e_fcoe_disable(struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
netdev_err(netdev, "device does not support FCoE\n");
return -ENODEV;
}
if (vsi->type != I40E_VSI_FCOE)
return -EBUSY;
if (!atomic_dec_and_test(&fcoe->refcnt))
return -EINVAL;
netdev_info(netdev, "FCoE disabled\n");
return 0;
}
/**
* i40e_fcoe_dma_pool_free - free the per cpu pool for FCoE DDP
* @fcoe: the FCoE sw object
* @dev: the device that the pool is associated with
* @cpu: the cpu for this pool
*
**/
static void i40e_fcoe_dma_pool_free(struct i40e_fcoe *fcoe,
struct device *dev,
unsigned int cpu)
{
struct i40e_fcoe_ddp_pool *ddp_pool;
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
if (!ddp_pool->pool) {
dev_warn(dev, "DDP pool already freed for cpu %d\n", cpu);
return;
}
dma_pool_destroy(ddp_pool->pool);
ddp_pool->pool = NULL;
}
/**
* i40e_fcoe_dma_pool_create - per cpu pool for FCoE DDP
* @fcoe: the FCoE sw object
* @dev: the device that the pool is associated with
* @cpu: the cpu for this pool
*
* Returns 0 on successful or non zero on failure
*
**/
static int i40e_fcoe_dma_pool_create(struct i40e_fcoe *fcoe,
struct device *dev,
unsigned int cpu)
{
struct i40e_fcoe_ddp_pool *ddp_pool;
struct dma_pool *pool;
char pool_name[32];
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
if (ddp_pool && ddp_pool->pool) {
dev_warn(dev, "DDP pool already allocated for cpu %d\n", cpu);
return 0;
}
snprintf(pool_name, sizeof(pool_name), "i40e_fcoe_ddp_%d", cpu);
pool = dma_pool_create(pool_name, dev, I40E_FCOE_DDP_PTR_MAX,
I40E_FCOE_DDP_PTR_ALIGN, PAGE_SIZE);
if (!pool) {
dev_err(dev, "dma_pool_create %s failed\n", pool_name);
return -ENOMEM;
}
ddp_pool->pool = pool;
return 0;
}
/**
* i40e_fcoe_free_ddp_resources - release FCoE DDP resources
* @vsi: the vsi FCoE is associated with
*
**/
void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
int cpu, i;
/* do nothing if not FCoE VSI */
if (vsi->type != I40E_VSI_FCOE)
return;
/* do nothing if no DDP pools were allocated */
if (!fcoe->ddp_pool)
return;
for (i = 0; i < I40E_FCOE_DDP_MAX; i++)
i40e_fcoe_ddp_put(vsi->netdev, i);
for_each_possible_cpu(cpu)
i40e_fcoe_dma_pool_free(fcoe, &pf->pdev->dev, cpu);
free_percpu(fcoe->ddp_pool);
fcoe->ddp_pool = NULL;
netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources released\n",
vsi->id, vsi->seid);
}
/**
* i40e_fcoe_setup_ddp_resources - allocate per cpu DDP resources
* @vsi: the VSI FCoE is associated with
*
* Returns 0 on successful or non zero on failure
*
**/
int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
struct device *dev = &pf->pdev->dev;
struct i40e_fcoe *fcoe = &pf->fcoe;
unsigned int cpu;
int i;
if (vsi->type != I40E_VSI_FCOE)
return -ENODEV;
/* do nothing if no DDP pools were allocated */
if (fcoe->ddp_pool)
return -EEXIST;
/* allocate per CPU memory to track DDP pools */
fcoe->ddp_pool = alloc_percpu(struct i40e_fcoe_ddp_pool);
if (!fcoe->ddp_pool) {
dev_err(&pf->pdev->dev, "failed to allocate percpu DDP\n");
return -ENOMEM;
}
/* allocate pci pool for each cpu */
for_each_possible_cpu(cpu) {
if (!i40e_fcoe_dma_pool_create(fcoe, dev, cpu))
continue;
dev_err(dev, "failed to alloc DDP pool on cpu:%d\n", cpu);
i40e_fcoe_free_ddp_resources(vsi);
return -ENOMEM;
}
/* initialize the sw context */
for (i = 0; i < I40E_FCOE_DDP_MAX; i++)
i40e_fcoe_ddp_clear(&fcoe->ddp[i]);
netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources allocated\n",
vsi->id, vsi->seid);
return 0;
}
/**
* i40e_fcoe_handle_status - check the Programming Status for FCoE
* @rx_ring: the Rx ring for this descriptor
* @rx_desc: the Rx descriptor for Programming Status, not a packet descriptor.
*
* Check if this is the Rx Programming Status descriptor write-back for FCoE.
* This is used to verify if the context/filter programming or invalidation
* requested by SW to the HW is successful or not and take actions accordingly.
**/
void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, u8 prog_id)
{
struct i40e_pf *pf = rx_ring->vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
struct i40e_fcoe_ddp *ddp;
u32 error;
u16 xid;
u64 qw;
/* we only care for FCoE here */
if (!i40e_fcoe_progid_is_fcoe(prog_id))
return;
xid = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param) &
(I40E_FCOE_DDP_MAX - 1);
if (!i40e_fcoe_xid_is_valid(xid))
return;
ddp = &fcoe->ddp[xid];
WARN_ON(xid != ddp->xid);
qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
/* DDP context programming status: failure or success */
if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) {
if (I40E_RX_PROG_FCOE_ERROR_TBL_FULL(error)) {
dev_err(&pf->pdev->dev, "xid %x ddp->xid %x TABLE FULL\n",
xid, ddp->xid);
ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT;
}
if (I40E_RX_PROG_FCOE_ERROR_CONFLICT(error)) {
dev_err(&pf->pdev->dev, "xid %x ddp->xid %x CONFLICT\n",
xid, ddp->xid);
ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT;
}
}
/* DDP context invalidation status: failure or success */
if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS) {
if (I40E_RX_PROG_FCOE_ERROR_INVLFAIL(error)) {
dev_err(&pf->pdev->dev, "xid %x ddp->xid %x INVALIDATION FAILURE\n",
xid, ddp->xid);
ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT;
}
/* clear the flag so we can retry invalidation */
clear_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags);
}
/* unmap DMA */
i40e_fcoe_ddp_unmap(pf, ddp);
i40e_fcoe_ddp_clear(ddp);
}
/**
* i40e_fcoe_handle_offload - check ddp status and mark it done
* @adapter: i40e adapter
* @rx_desc: advanced rx descriptor
* @skb: the skb holding the received data
*
* This checks ddp status.
*
* Returns : < 0 indicates an error or not a FCOE ddp, 0 indicates
* not passing the skb to ULD, > 0 indicates is the length of data
* being ddped.
*
**/
int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct i40e_pf *pf = rx_ring->vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
struct fc_frame_header *fh = NULL;
struct i40e_fcoe_ddp *ddp = NULL;
u32 status, fltstat;
u32 error, fcerr;
int rc = -EINVAL;
u16 ptype;
u16 xid;
u64 qw;
/* check this rxd is for programming status */
qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
/* packet descriptor, check packet type */
ptype = (qw & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
if (!i40e_rx_is_fcoe(ptype))
goto out_no_ddp;
error = (qw & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT;
fcerr = (error >> I40E_RX_DESC_ERROR_L3L4E_SHIFT) &
I40E_RX_DESC_FCOE_ERROR_MASK;
/* check stateless offload error */
if (unlikely(fcerr == I40E_RX_DESC_ERROR_L3L4E_PROT)) {
dev_err(&pf->pdev->dev, "Protocol Error\n");
skb->ip_summed = CHECKSUM_NONE;
} else {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
/* check hw status on ddp */
status = (qw & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT;
fltstat = (status >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
I40E_RX_DESC_FLTSTAT_FCMASK;
/* now we are ready to check DDP */
fh = i40e_fcoe_fc_frame_header(skb);
xid = i40e_fcoe_fc_get_xid(fh);
if (!i40e_fcoe_xid_is_valid(xid))
goto out_no_ddp;
/* non DDP normal receive, return to the protocol stack */
if (fltstat == I40E_RX_DESC_FLTSTAT_NOMTCH)
goto out_no_ddp;
/* do we have a sw ddp context setup ? */
ddp = &fcoe->ddp[xid];
if (!ddp->sgl)
goto out_no_ddp;
/* fetch xid from hw rxd wb, which should match up the sw ctxt */
xid = le16_to_cpu(rx_desc->wb.qword0.lo_dword.mirr_fcoe.fcoe_ctx_id);
if (ddp->xid != xid) {
dev_err(&pf->pdev->dev, "xid 0x%x does not match ctx_xid 0x%x\n",
ddp->xid, xid);
goto out_put_ddp;
}
/* the same exchange has already errored out */
if (ddp->fcerr) {
dev_err(&pf->pdev->dev, "xid 0x%x fcerr 0x%x reported fcer 0x%x\n",
xid, ddp->fcerr, fcerr);
goto out_put_ddp;
}
/* fcoe param is valid by now with correct DDPed length */
ddp->len = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param);
ddp->fcerr = fcerr;
/* header posting only, useful only for target mode and debugging */
if (fltstat == I40E_RX_DESC_FLTSTAT_DDP) {
/* For target mode, we get header of the last packet but it
* does not have the FCoE trailer field, i.e., CRC and EOF
* Ordered Set since they are offloaded by the HW, so fill
* it up correspondingly to allow the packet to pass through
* to the upper protocol stack.
*/
u32 f_ctl = ntoh24(fh->fh_f_ctl);
if ((f_ctl & FC_FC_END_SEQ) &&
(fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA)) {
struct fcoe_crc_eof *crc = NULL;
crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
crc->fcoe_eof = FC_EOF_T;
} else {
/* otherwise, drop the header only frame */
rc = 0;
goto out_no_ddp;
}
}
out_put_ddp:
/* either we got RSP or we have an error, unmap DMA in both cases */
i40e_fcoe_ddp_unmap(pf, ddp);
if (ddp->len && !ddp->fcerr) {
int pkts;
rc = ddp->len;
i40e_fcoe_ddp_clear(ddp);
ddp->len = rc;
pkts = DIV_ROUND_UP(rc, 2048);
rx_ring->stats.bytes += rc;
rx_ring->stats.packets += pkts;
rx_ring->q_vector->rx.total_bytes += rc;
rx_ring->q_vector->rx.total_packets += pkts;
set_bit(__I40E_FCOE_DDP_DONE, &ddp->flags);
}
out_no_ddp:
return rc;
}
/**
* i40e_fcoe_ddp_setup - called to set up ddp context
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
* @target_mode: indicates this is a DDP request for target
*
* Returns : 1 for success and 0 for no DDP on this I/O
**/
static int i40e_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc,
int target_mode)
{
static const unsigned int bufflen = I40E_FCOE_DDP_BUF_MIN;
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_fcoe_ddp_pool *ddp_pool;
struct i40e_pf *pf = np->vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
unsigned int i, j, dmacount;
struct i40e_fcoe_ddp *ddp;
unsigned int firstoff = 0;
unsigned int thisoff = 0;
unsigned int thislen = 0;
struct scatterlist *sg;
dma_addr_t addr = 0;
unsigned int len;
if (xid >= I40E_FCOE_DDP_MAX) {
dev_warn(&pf->pdev->dev, "xid=0x%x out-of-range\n", xid);
return 0;
}
/* no DDP if we are already down or resetting */
if (test_bit(__I40E_DOWN, &pf->state) ||
test_bit(__I40E_NEEDS_RESTART, &pf->state)) {
dev_info(&pf->pdev->dev, "xid=0x%x device in reset/down\n",
xid);
return 0;
}
ddp = &fcoe->ddp[xid];
if (ddp->sgl) {
dev_info(&pf->pdev->dev, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
xid, ddp->sgl, ddp->sgc);
return 0;
}
i40e_fcoe_ddp_clear(ddp);
if (!fcoe->ddp_pool) {
dev_info(&pf->pdev->dev, "No DDP pool, xid 0x%x\n", xid);
return 0;
}
ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
if (!ddp_pool->pool) {
dev_info(&pf->pdev->dev, "No percpu ddp pool, xid 0x%x\n", xid);
goto out_noddp;
}
/* setup dma from scsi command sgl */
dmacount = dma_map_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
if (dmacount == 0) {
dev_info(&pf->pdev->dev, "dma_map_sg for sgl %p, sgc %d failed\n",
sgl, sgc);
goto out_noddp_unmap;
}
/* alloc the udl from our ddp pool */
ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
dev_info(&pf->pdev->dev,
"Failed allocated ddp context, xid 0x%x\n", xid);
goto out_noddp_unmap;
}
j = 0;
ddp->len = 0;
for_each_sg(sgl, sg, dmacount, i) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
ddp->len += len;
while (len) {
/* max number of buffers allowed in one DDP context */
if (j >= I40E_FCOE_DDP_BUFFCNT_MAX) {
dev_info(&pf->pdev->dev,
"xid=%x:%d,%d,%d:addr=%llx not enough descriptors\n",
xid, i, j, dmacount, (u64)addr);
goto out_noddp_free;
}
/* get the offset of length of current buffer */
thisoff = addr & ((dma_addr_t)bufflen - 1);
thislen = min_t(unsigned int, (bufflen - thisoff), len);
/* all but the 1st buffer (j == 0)
* must be aligned on bufflen
*/
if ((j != 0) && (thisoff))
goto out_noddp_free;
/* all but the last buffer
* ((i == (dmacount - 1)) && (thislen == len))
* must end at bufflen
*/
if (((i != (dmacount - 1)) || (thislen != len)) &&
((thislen + thisoff) != bufflen))
goto out_noddp_free;
ddp->udl[j] = (u64)(addr - thisoff);
/* only the first buffer may have none-zero offset */
if (j == 0)
firstoff = thisoff;
len -= thislen;
addr += thislen;
j++;
}
}
/* only the last buffer may have non-full bufflen */
ddp->lastsize = thisoff + thislen;
ddp->firstoff = firstoff;
ddp->list_len = j;
ddp->pool = ddp_pool->pool;
ddp->sgl = sgl;
ddp->sgc = sgc;
ddp->xid = xid;
if (target_mode)
set_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags);
set_bit(__I40E_FCOE_DDP_INITALIZED, &ddp->flags);
put_cpu();
return 1; /* Success */
out_noddp_free:
dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
i40e_fcoe_ddp_clear(ddp);
out_noddp_unmap:
dma_unmap_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
out_noddp:
put_cpu();
return 0;
}
/**
* i40e_fcoe_ddp_get - called to set up ddp context in initiator mode
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
* and is expected to be called from ULD, e.g., FCP layer of libfc
* to set up ddp for the corresponding xid of the given sglist for
* the corresponding I/O.
*
* Returns : 1 for success and 0 for no ddp
**/
static int i40e_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
}
/**
* i40e_fcoe_ddp_target - called to set up ddp context in target mode
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_target
* and is expected to be called from ULD, e.g., FCP layer of libfc
* to set up ddp for the corresponding xid of the given sglist for
* the corresponding I/O. The DDP in target mode is a write I/O request
* from the initiator.
*
* Returns : 1 for success and 0 for no ddp
**/
static int i40e_fcoe_ddp_target(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
}
/**
* i40e_fcoe_program_ddp - programs the HW DDP related descriptors
* @tx_ring: transmit ring for this packet
* @skb: the packet to be sent out
* @sof: the SOF to indicate class of service
*
* Determine if it is READ/WRITE command, and finds out if there is
* a matching SW DDP context for this command. DDP is applicable
* only in case of READ if initiator or WRITE in case of
* responder (via checking XFER_RDY).
*
* Note: caller checks sof and ddp sw context
*
* Returns : none
*
**/
static void i40e_fcoe_program_ddp(struct i40e_ring *tx_ring,
struct sk_buff *skb,
struct i40e_fcoe_ddp *ddp, u8 sof)
{
struct i40e_fcoe_filter_context_desc *filter_desc = NULL;
struct i40e_fcoe_queue_context_desc *queue_desc = NULL;
struct i40e_fcoe_ddp_context_desc *ddp_desc = NULL;
struct i40e_pf *pf = tx_ring->vsi->back;
u16 i = tx_ring->next_to_use;
struct fc_frame_header *fh;
u64 flags_rsvd_lanq = 0;
bool target_mode;
/* check if abort is still pending */
if (test_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags)) {
dev_warn(&pf->pdev->dev,
"DDP abort is still pending xid:%hx and ddp->flags:%lx:\n",
ddp->xid, ddp->flags);
return;
}
/* set the flag to indicate this is programmed */
if (test_and_set_bit(__I40E_FCOE_DDP_PROGRAMMED, &ddp->flags)) {
dev_warn(&pf->pdev->dev,
"DDP is already programmed for xid:%hx and ddp->flags:%lx:\n",
ddp->xid, ddp->flags);
return;
}
/* Prepare the DDP context descriptor */
ddp_desc = I40E_DDP_CONTEXT_DESC(tx_ring, i);
i++;
if (i == tx_ring->count)
i = 0;
ddp_desc->type_cmd_foff_lsize =
cpu_to_le64(I40E_TX_DESC_DTYPE_DDP_CTX |
((u64)I40E_FCOE_DDP_CTX_DESC_BSIZE_4K <<
I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT) |
((u64)ddp->firstoff <<
I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT) |
((u64)ddp->lastsize <<
I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT));
ddp_desc->rsvd = cpu_to_le64(0);
/* target mode needs last packet in the sequence */
target_mode = test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags);
if (target_mode)
ddp_desc->type_cmd_foff_lsize |=
cpu_to_le64(I40E_FCOE_DDP_CTX_DESC_LASTSEQH);
/* Prepare queue_context descriptor */
queue_desc = I40E_QUEUE_CONTEXT_DESC(tx_ring, i++);
if (i == tx_ring->count)
i = 0;
queue_desc->dmaindx_fbase = cpu_to_le64(ddp->xid | ((u64)ddp->udp));
queue_desc->flen_tph = cpu_to_le64(ddp->list_len |
((u64)(I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC |
I40E_FCOE_QUEUE_CTX_DESC_TPHDATA) <<
I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT));
/* Prepare filter_context_desc */
filter_desc = I40E_FILTER_CONTEXT_DESC(tx_ring, i);
i++;
if (i == tx_ring->count)
i = 0;
fh = (struct fc_frame_header *)skb_transport_header(skb);
filter_desc->param = cpu_to_le32(ntohl(fh->fh_parm_offset));
filter_desc->seqn = cpu_to_le16(ntohs(fh->fh_seq_cnt));
filter_desc->rsvd_dmaindx = cpu_to_le16(ddp->xid <<
I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT);
flags_rsvd_lanq = I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP;
flags_rsvd_lanq |= (u64)(target_mode ?
I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP :
I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT);
flags_rsvd_lanq |= (u64)((sof == FC_SOF_I2 || sof == FC_SOF_N2) ?
I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 :
I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3);
flags_rsvd_lanq |= ((u64)skb->queue_mapping <<
I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT);
filter_desc->flags_rsvd_lanq = cpu_to_le64(flags_rsvd_lanq);
/* By this time, all offload related descriptors has been programmed */
tx_ring->next_to_use = i;
}
/**
* i40e_fcoe_invalidate_ddp - invalidates DDP in case of abort
* @tx_ring: transmit ring for this packet
* @skb: the packet associated w/ this DDP invalidation, i.e., ABTS
* @ddp: the SW DDP context for this DDP
*
* Programs the Tx context descriptor to do DDP invalidation.
**/
static void i40e_fcoe_invalidate_ddp(struct i40e_ring *tx_ring,
struct sk_buff *skb,
struct i40e_fcoe_ddp *ddp)
{
struct i40e_tx_context_desc *context_desc;
int i;
if (test_and_set_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags))
return;
i = tx_ring->next_to_use;
context_desc = I40E_TX_CTXTDESC(tx_ring, i);
i++;
if (i == tx_ring->count)
i = 0;
context_desc->tunneling_params = cpu_to_le32(0);
context_desc->l2tag2 = cpu_to_le16(0);
context_desc->rsvd = cpu_to_le16(0);
context_desc->type_cmd_tso_mss = cpu_to_le64(
I40E_TX_DESC_DTYPE_FCOE_CTX |
(I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL <<
I40E_TXD_CTX_QW1_CMD_SHIFT) |
(I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND <<
I40E_TXD_CTX_QW1_CMD_SHIFT));
tx_ring->next_to_use = i;
}
/**
* i40e_fcoe_handle_ddp - check we should setup or invalidate DDP
* @tx_ring: transmit ring for this packet
* @skb: the packet to be sent out
* @sof: the SOF to indicate class of service
*
* Determine if it is ABTS/READ/XFER_RDY, and finds out if there is
* a matching SW DDP context for this command. DDP is applicable
* only in case of READ if initiator or WRITE in case of
* responder (via checking XFER_RDY). In case this is an ABTS, send
* just invalidate the context.
**/
static void i40e_fcoe_handle_ddp(struct i40e_ring *tx_ring,
struct sk_buff *skb, u8 sof)
{
struct i40e_pf *pf = tx_ring->vsi->back;
struct i40e_fcoe *fcoe = &pf->fcoe;
struct fc_frame_header *fh;
struct i40e_fcoe_ddp *ddp;
u32 f_ctl;
u8 r_ctl;
u16 xid;
fh = (struct fc_frame_header *)skb_transport_header(skb);
f_ctl = ntoh24(fh->fh_f_ctl);
r_ctl = fh->fh_r_ctl;
ddp = NULL;
if ((r_ctl == FC_RCTL_DD_DATA_DESC) && (f_ctl & FC_FC_EX_CTX)) {
/* exchange responder? if so, XFER_RDY for write */
xid = ntohs(fh->fh_rx_id);
if (i40e_fcoe_xid_is_valid(xid)) {
ddp = &fcoe->ddp[xid];
if ((ddp->xid == xid) &&
(test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof);
}
} else if (r_ctl == FC_RCTL_DD_UNSOL_CMD) {
/* exchange originator, check READ cmd */
xid = ntohs(fh->fh_ox_id);
if (i40e_fcoe_xid_is_valid(xid)) {
ddp = &fcoe->ddp[xid];
if ((ddp->xid == xid) &&
(!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof);
}
} else if (r_ctl == FC_RCTL_BA_ABTS) {
/* exchange originator, check ABTS */
xid = ntohs(fh->fh_ox_id);
if (i40e_fcoe_xid_is_valid(xid)) {
ddp = &fcoe->ddp[xid];
if ((ddp->xid == xid) &&
(!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
i40e_fcoe_invalidate_ddp(tx_ring, skb, ddp);
}
}
}
/**
* i40e_fcoe_tso - set up FCoE TSO
* @tx_ring: ring to send buffer on
* @skb: send buffer
* @tx_flags: collected send information
* @hdr_len: the tso header length
* @sof: the SOF to indicate class of service
*
* Note must already have sof checked to be either class 2 or class 3 before
* calling this function.
*
* Returns 1 to indicate sequence segmentation offload is properly setup
* or returns 0 to indicate no tso is needed, otherwise returns error
* code to drop the frame.
**/
static int i40e_fcoe_tso(struct i40e_ring *tx_ring,
struct sk_buff *skb,
u32 tx_flags, u8 *hdr_len, u8 sof)
{
struct i40e_tx_context_desc *context_desc;
u32 cd_type, cd_cmd, cd_tso_len, cd_mss;
struct fc_frame_header *fh;
u64 cd_type_cmd_tso_mss;
/* must match gso type as FCoE */
if (!skb_is_gso(skb))
return 0;
/* is it the expected gso type for FCoE ?*/
if (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) {
netdev_err(skb->dev,
"wrong gso type %d:expecting SKB_GSO_FCOE\n",
skb_shinfo(skb)->gso_type);
return -EINVAL;
}
/* header and trailer are inserted by hw */
*hdr_len = skb_transport_offset(skb) + sizeof(struct fc_frame_header) +
sizeof(struct fcoe_crc_eof);
/* check sof to decide a class 2 or 3 TSO */
if (likely(i40e_fcoe_sof_is_class3(sof)))
cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3;
else
cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2;
/* param field valid? */
fh = (struct fc_frame_header *)skb_transport_header(skb);
if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
cd_cmd |= I40E_FCOE_TX_CTX_DESC_RELOFF;
/* fill the field values */
cd_type = I40E_TX_DESC_DTYPE_FCOE_CTX;
cd_tso_len = skb->len - *hdr_len;
cd_mss = skb_shinfo(skb)->gso_size;
cd_type_cmd_tso_mss =
((u64)cd_type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
((u64)cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
/* grab the next descriptor */
context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
tx_ring->next_to_use++;
if (tx_ring->next_to_use == tx_ring->count)
tx_ring->next_to_use = 0;
context_desc->tunneling_params = 0;
context_desc->l2tag2 = cpu_to_le16((tx_flags & I40E_TX_FLAGS_VLAN_MASK)
>> I40E_TX_FLAGS_VLAN_SHIFT);
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
return 1;
}
/**
* i40e_fcoe_tx_map - build the tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
* @first: first buffer info buffer to use
* @tx_flags: collected send information
* @hdr_len: ptr to the size of the packet header
* @eof: the frame eof value
*
* Note, for FCoE, sof and eof are already checked
**/
static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring,
struct sk_buff *skb,
struct i40e_tx_buffer *first,
u32 tx_flags, u8 hdr_len, u8 eof)
{
u32 td_offset = 0;
u32 td_cmd = 0;
u32 maclen;
/* insert CRC */
td_cmd = I40E_TX_DESC_CMD_ICRC;
/* setup MACLEN */
maclen = skb_network_offset(skb);
if (tx_flags & I40E_TX_FLAGS_SW_VLAN)
maclen += sizeof(struct vlan_hdr);
if (skb->protocol == htons(ETH_P_FCOE)) {
/* for FCoE, maclen should exclude ether type */
maclen -= 2;
/* setup type as FCoE and EOF insertion */
td_cmd |= (I40E_TX_DESC_CMD_FCOET | i40e_fcoe_ctxt_eof(eof));
/* setup FCoELEN and FCLEN */
td_offset |= ((((sizeof(struct fcoe_hdr) + 2) >> 2) <<
I40E_TX_DESC_LENGTH_IPLEN_SHIFT) |
((sizeof(struct fc_frame_header) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT));
/* trim to exclude trailer */
pskb_trim(skb, skb->len - sizeof(struct fcoe_crc_eof));
}
/* MACLEN is ether header length in words not bytes */
td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
return i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
td_cmd, td_offset);
}
/**
* i40e_fcoe_set_skb_header - adjust skb header point for FIP/FCoE/FC
* @skb: the skb to be adjusted
*
* Returns true if this skb is a FCoE/FIP or VLAN carried FCoE/FIP and then
* adjusts the skb header pointers correspondingly. Otherwise, returns false.
**/
static inline int i40e_fcoe_set_skb_header(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
skb_reset_mac_header(skb);
skb->mac_len = sizeof(struct ethhdr);
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)eth_hdr(skb);
protocol = veth->h_vlan_encapsulated_proto;
skb->mac_len += sizeof(struct vlan_hdr);
}
/* FCoE or FIP only */
if ((protocol != htons(ETH_P_FIP)) &&
(protocol != htons(ETH_P_FCOE)))
return -EINVAL;
/* set header to L2 of FCoE/FIP */
skb_set_network_header(skb, skb->mac_len);
if (protocol == htons(ETH_P_FIP))
return 0;
/* set header to L3 of FC */
skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
return 0;
}
/**
* i40e_fcoe_xmit_frame - transmit buffer
* @skb: send buffer
* @netdev: the fcoe netdev
*
* Returns 0 if sent, else an error code
**/
static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(skb->dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
struct i40e_tx_buffer *first;
u32 tx_flags = 0;
u8 hdr_len = 0;
u8 sof = 0;
u8 eof = 0;
int fso;
if (i40e_fcoe_set_skb_header(skb))
goto out_drop;
if (!i40e_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
/* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_bi[tx_ring->next_to_use];
/* FIP is a regular L2 traffic w/o offload */
if (skb->protocol == htons(ETH_P_FIP))
goto out_send;
/* check sof and eof, only supports FC Class 2 or 3 */
if (i40e_fcoe_fc_sof(skb, &sof) || i40e_fcoe_fc_eof(skb, &eof)) {
netdev_err(netdev, "SOF/EOF error:%02x - %02x\n", sof, eof);
goto out_drop;
}
/* always do FCCRC for FCoE */
tx_flags |= I40E_TX_FLAGS_FCCRC;
/* check we should do sequence offload */
fso = i40e_fcoe_tso(tx_ring, skb, tx_flags, &hdr_len, sof);
if (fso < 0)
goto out_drop;
else if (fso)
tx_flags |= I40E_TX_FLAGS_FSO;
else
i40e_fcoe_handle_ddp(tx_ring, skb, sof);
out_send:
/* send out the packet */
i40e_fcoe_tx_map(tx_ring, skb, first, tx_flags, hdr_len, eof);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
out_drop:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/**
* i40e_fcoe_change_mtu - NDO callback to change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
*
* Returns error as operation not permitted
*
**/
static int i40e_fcoe_change_mtu(struct net_device *netdev, int new_mtu)
{
netdev_warn(netdev, "MTU change is not supported on FCoE interfaces\n");
return -EPERM;
}
/**
* i40e_fcoe_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
*
**/
static int i40e_fcoe_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
if (features & NETIF_F_HW_VLAN_CTAG_RX)
i40e_vlan_stripping_enable(vsi);
else
i40e_vlan_stripping_disable(vsi);
return 0;
}
static const struct net_device_ops i40e_fcoe_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
.ndo_get_stats64 = i40e_get_netdev_stats_struct,
.ndo_set_rx_mode = i40e_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40e_set_mac,
.ndo_change_mtu = i40e_fcoe_change_mtu,
.ndo_do_ioctl = i40e_ioctl,
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
.ndo_setup_tc = i40e_setup_tc,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40e_netpoll,
#endif
.ndo_start_xmit = i40e_fcoe_xmit_frame,
.ndo_fcoe_enable = i40e_fcoe_enable,
.ndo_fcoe_disable = i40e_fcoe_disable,
.ndo_fcoe_ddp_setup = i40e_fcoe_ddp_get,
.ndo_fcoe_ddp_done = i40e_fcoe_ddp_put,
.ndo_fcoe_ddp_target = i40e_fcoe_ddp_target,
.ndo_set_features = i40e_fcoe_set_features,
};
/**
* i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI
* @vsi: pointer to the associated VSI struct
* @ctxt: pointer to the associated VSI context to be passed to HW
*
* Returns 0 on success or < 0 on error
**/
void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
{
struct i40e_hw *hw = &vsi->back->hw;
struct i40e_pf *pf = vsi->back;
if (vsi->type != I40E_VSI_FCOE)
return;
netdev->features = (NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER);
netdev->vlan_features = netdev->features;
netdev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER);
netdev->fcoe_ddp_xid = I40E_FCOE_DDP_MAX - 1;
netdev->features |= NETIF_F_ALL_FCOE;
netdev->vlan_features |= NETIF_F_ALL_FCOE;
netdev->hw_features |= netdev->features;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1);
netdev->mtu = FCOE_MTU;
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_VN2VN_MACS, 0, false, false);
i40e_add_filter(vsi, FIP_ALL_P2P_MACS, 0, false, false);
/* use san mac */
ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
ether_addr_copy(netdev->perm_addr, hw->mac.san_addr);
/* fcoe netdev ops */
netdev->netdev_ops = &i40e_fcoe_netdev_ops;
}
/**
* i40e_fcoe_vsi_setup - allocate and set up FCoE VSI
* @pf: the pf that VSI is associated with
*
**/
void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
u16 seid;
int i;
if (!(pf->flags & I40E_FLAG_FCOE_ENABLED))
return;
BUG_ON(!pf->vsi[pf->lan_vsi]);
for (i = 0; i < pf->num_alloc_vsi; i++) {
vsi = pf->vsi[i];
if (vsi && vsi->type == I40E_VSI_FCOE) {
dev_warn(&pf->pdev->dev,
"FCoE VSI already created\n");
return;
}
}
seid = pf->vsi[pf->lan_vsi]->seid;
vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0);
if (vsi) {
dev_dbg(&pf->pdev->dev,
"Successfully created FCoE VSI seid %d id %d uplink_seid %d pf seid %d\n",
vsi->seid, vsi->id, vsi->uplink_seid, seid);
} else {
dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n");
}
}
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
#ifndef _I40E_FCOE_H_
#define _I40E_FCOE_H_
/* FCoE HW context helper macros */
#define I40E_DDP_CONTEXT_DESC(R, i) \
(&(((struct i40e_fcoe_ddp_context_desc *)((R)->desc))[i]))
#define I40E_QUEUE_CONTEXT_DESC(R, i) \
(&(((struct i40e_fcoe_queue_context_desc *)((R)->desc))[i]))
#define I40E_FILTER_CONTEXT_DESC(R, i) \
(&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i]))
/* receive queue descriptor filter status for FCoE */
#define I40E_RX_DESC_FLTSTAT_FCMASK 0x3
#define I40E_RX_DESC_FLTSTAT_NOMTCH 0x0 /* no ddp context match */
#define I40E_RX_DESC_FLTSTAT_NODDP 0x1 /* no ddp due to error */
#define I40E_RX_DESC_FLTSTAT_DDP 0x2 /* DDPed payload, post header */
#define I40E_RX_DESC_FLTSTAT_FCPRSP 0x3 /* FCP_RSP */
/* receive queue descriptor error codes for FCoE */
#define I40E_RX_DESC_FCOE_ERROR_MASK \
(I40E_RX_DESC_ERROR_L3L4E_PROT | \
I40E_RX_DESC_ERROR_L3L4E_FC | \
I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR | \
I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN)
/* receive queue descriptor programming error */
#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL(e) \
(((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) & 0x1)
#define I40E_RX_PROG_FCOE_ERROR_CONFLICT(e) \
(((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \
(1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
#define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \
(1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \
I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT \
I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT
/* FCoE DDP related definitions */
#define I40E_FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
#define I40E_FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
#define I40E_FCOE_DDP_BUFFCNT_MAX 512 /* 9 bits bufcnt */
#define I40E_FCOE_DDP_PTR_ALIGN 16
#define I40E_FCOE_DDP_PTR_MAX (I40E_FCOE_DDP_BUFFCNT_MAX * sizeof(dma_addr_t))
#define I40E_FCOE_DDP_BUF_MIN 4096
#define I40E_FCOE_DDP_MAX 2048
#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
/* supported netdev features for FCoE */
#define I40E_FCOE_NETIF_FEATURES (NETIF_F_ALL_FCOE | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_FILTER)
/* DDP context flags */
enum i40e_fcoe_ddp_flags {
__I40E_FCOE_DDP_NONE = 1,
__I40E_FCOE_DDP_TARGET,
__I40E_FCOE_DDP_INITALIZED,
__I40E_FCOE_DDP_PROGRAMMED,
__I40E_FCOE_DDP_DONE,
__I40E_FCOE_DDP_ABORTED,
__I40E_FCOE_DDP_UNMAPPED,
};
/* DDP SW context struct */
struct i40e_fcoe_ddp {
int len;
u16 xid;
u16 firstoff;
u16 lastsize;
u16 list_len;
u8 fcerr;
u8 prerr;
unsigned long flags;
unsigned int sgc;
struct scatterlist *sgl;
dma_addr_t udp;
u64 *udl;
struct dma_pool *pool;
};
struct i40e_fcoe_ddp_pool {
struct dma_pool *pool;
};
struct i40e_fcoe {
unsigned long mode;
atomic_t refcnt;
struct i40e_fcoe_ddp_pool __percpu *ddp_pool;
struct i40e_fcoe_ddp ddp[I40E_FCOE_DDP_MAX];
};
#endif /* _I40E_FCOE_H_ */
......@@ -269,7 +269,11 @@ static void i40e_service_event_schedule(struct i40e_pf *pf)
* device is munged, not just the one netdev port, so go for the full
* reset.
**/
#ifdef I40E_FCOE
void i40e_tx_timeout(struct net_device *netdev)
#else
static void i40e_tx_timeout(struct net_device *netdev)
#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
......@@ -349,9 +353,15 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
* Returns the address of the device statistics structure.
* The statistics are actually updated from the service task.
**/
#ifdef I40E_FCOE
struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct net_device *netdev,
struct rtnl_link_stats64 *stats)
#else
static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct net_device *netdev,
struct rtnl_link_stats64 *stats)
#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_ring *tx_ring, *rx_ring;
......@@ -636,6 +646,55 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
veb->stat_offsets_loaded = true;
}
#ifdef I40E_FCOE
/**
* i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
* @vsi: the VSI that is capable of doing FCoE
**/
static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_fcoe_stats *ofs;
struct i40e_fcoe_stats *fs; /* device's eth stats */
int idx;
if (vsi->type != I40E_VSI_FCOE)
return;
idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
fs = &vsi->fcoe_stats;
ofs = &vsi->fcoe_stats_offsets;
i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->fcoe_last_error, &fs->fcoe_last_error);
i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
vsi->fcoe_stat_offsets_loaded,
&ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
vsi->fcoe_stat_offsets_loaded = true;
}
#endif
/**
* i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
* @pf: the corresponding PF
......@@ -1064,6 +1123,9 @@ void i40e_update_stats(struct i40e_vsi *vsi)
i40e_update_pf_stats(pf);
i40e_update_vsi_stats(vsi);
#ifdef I40E_FCOE
i40e_update_fcoe_stats(vsi);
#endif
}
/**
......@@ -1315,7 +1377,11 @@ void i40e_del_filter(struct i40e_vsi *vsi,
*
* Returns 0 on success, negative on failure
**/
#ifdef I40E_FCOE
int i40e_set_mac(struct net_device *netdev, void *p)
#else
static int i40e_set_mac(struct net_device *netdev, void *p)
#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
......@@ -1376,10 +1442,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
*
* Setup VSI queue mapping for enabled traffic classes.
**/
#ifdef I40E_FCOE
void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc,
bool is_add)
#else
static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc,
bool is_add)
#endif
{
struct i40e_pf *pf = vsi->back;
u16 sections = 0;
......@@ -1425,6 +1498,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
case I40E_VSI_MAIN:
qcount = min_t(int, pf->rss_size, num_tc_qps);
break;
#ifdef I40E_FCOE
case I40E_VSI_FCOE:
qcount = num_tc_qps;
break;
#endif
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
......@@ -1491,7 +1569,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
* i40e_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
**/
#ifdef I40E_FCOE
void i40e_set_rx_mode(struct net_device *netdev)
#else
static void i40e_set_rx_mode(struct net_device *netdev)
#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_mac_filter *f, *ftmp;
......@@ -2069,8 +2151,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
*
* net_device_ops implementation for adding vlan ids
**/
#ifdef I40E_FCOE
int i40e_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
#else
static int i40e_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
......@@ -2103,8 +2190,13 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
*
* net_device_ops implementation for removing vlan ids
**/
#ifdef I40E_FCOE
int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
#else
static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
......@@ -2236,6 +2328,9 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
#ifdef I40E_FCOE
i40e_fcoe_setup_ddp_resources(vsi);
#endif
return err;
}
......@@ -2255,6 +2350,9 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
i40e_free_rx_resources(vsi->rx_rings[i]);
#ifdef I40E_FCOE
i40e_fcoe_free_ddp_resources(vsi);
#endif
}
/**
......@@ -2296,6 +2394,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.qlen = ring->count;
tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED));
#ifdef I40E_FCOE
tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
#endif
tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
/* FDIR VSI tx ring can still use RS bit and writebacks */
if (vsi->type != I40E_VSI_FDIR)
......@@ -2408,6 +2509,9 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
#ifdef I40E_FCOE
rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
#endif
/* set the prefena field to 1 because the manual says to */
rx_ctx.prefena = 1;
......@@ -2492,6 +2596,17 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
break;
}
#ifdef I40E_FCOE
/* setup rx buffer for FCoE */
if ((vsi->type == I40E_VSI_FCOE) &&
(vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
vsi->rx_hdr_len = 0;
vsi->rx_buf_len = I40E_RXBUFFER_3072;
vsi->max_frame = I40E_RXBUFFER_3072;
vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
}
#endif /* I40E_FCOE */
/* round up for the chip's needs */
vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
(1 << I40E_RXQ_CTX_HBUFF_SHIFT));
......@@ -3252,7 +3367,11 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
* This is used by netconsole to send skbs without having to re-enable
* interrupts. It's not called while the normal interrupt routine is executing.
**/
#ifdef I40E_FCOE
void i40e_netpoll(struct net_device *netdev)
#else
static void i40e_netpoll(struct net_device *netdev)
#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
......@@ -4202,12 +4321,20 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
continue;
/* - Enable all TCs for the LAN VSI
#ifdef I40E_FCOE
* - For FCoE VSI only enable the TC configured
* as per the APP TLV
#endif
* - For all others keep them at TC0 for now
*/
if (v == pf->lan_vsi)
tc_map = i40e_pf_get_tc_map(pf);
else
tc_map = i40e_pf_get_default_tc(pf);
#ifdef I40E_FCOE
if (pf->vsi[v]->type == I40E_VSI_FCOE)
tc_map = i40e_get_fcoe_tc_map(pf);
#endif /* #ifdef I40E_FCOE */
ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
if (ret) {
......@@ -4434,7 +4561,11 @@ void i40e_down(struct i40e_vsi *vsi)
* @netdev: net device to configure
* @tc: number of traffic classes to enable
**/
#ifdef I40E_FCOE
int i40e_setup_tc(struct net_device *netdev, u8 tc)
#else
static int i40e_setup_tc(struct net_device *netdev, u8 tc)
#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
......@@ -4499,7 +4630,11 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
*
* Returns 0 on success, negative value on failure
**/
#ifdef I40E_FCOE
int i40e_open(struct net_device *netdev)
#else
static int i40e_open(struct net_device *netdev)
#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
......@@ -4635,7 +4770,11 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
*
* Returns 0, this is not allowed to fail
**/
#ifdef I40E_FCOE
int i40e_close(struct net_device *netdev)
#else
static int i40e_close(struct net_device *netdev)
#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
......@@ -5050,6 +5189,9 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
switch (vsi->type) {
case I40E_VSI_MAIN:
#ifdef I40E_FCOE
case I40E_VSI_FCOE:
#endif
if (!vsi->netdev || !vsi->netdev_registered)
break;
......@@ -5768,7 +5910,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
goto end_core_reset;
}
#endif /* CONFIG_I40E_DCB */
#ifdef I40E_FCOE
ret = i40e_init_pf_fcoe(pf);
if (ret)
dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
#endif
/* do basic switch setup */
ret = i40e_setup_pf_switch(pf, reinit);
if (ret)
......@@ -6107,6 +6254,15 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
I40E_REQ_DESCRIPTOR_MULTIPLE);
break;
#ifdef I40E_FCOE
case I40E_VSI_FCOE:
vsi->alloc_queue_pairs = pf->num_fcoe_qps;
vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
I40E_REQ_DESCRIPTOR_MULTIPLE);
vsi->num_q_vectors = pf->num_fcoe_msix;
break;
#endif /* I40E_FCOE */
default:
WARN_ON(1);
return -ENODATA;
......@@ -6418,6 +6574,9 @@ static int i40e_init_msix(struct i40e_pf *pf)
* is governed by number of cpus in the system.
* - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
#ifdef I40E_FCOE
* - The number of FCOE qps.
#endif
* Once we count this up, try the request.
*
* If we can't get what we want, we'll simplify to nearly nothing
......@@ -6430,6 +6589,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
v_budget++;
#ifdef I40E_FCOE
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
pf->num_fcoe_msix = pf->num_fcoe_qps;
v_budget += pf->num_fcoe_msix;
}
#endif
/* Scale down if necessary, and the rings will share vectors */
v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
......@@ -6448,6 +6614,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
* of these features based on the policy and at the end disable
* the features that did not get any vectors.
*/
#ifdef I40E_FCOE
pf->num_fcoe_qps = 0;
pf->num_fcoe_msix = 0;
#endif
pf->num_vmdq_msix = 0;
}
......@@ -6478,9 +6648,24 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_lan_msix = 1;
break;
case 3:
#ifdef I40E_FCOE
/* give one vector to FCoE */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
pf->num_lan_msix = 1;
pf->num_fcoe_msix = 1;
}
#else
pf->num_lan_msix = 2;
#endif
break;
default:
#ifdef I40E_FCOE
/* give one vector to FCoE */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
pf->num_fcoe_msix = 1;
vec--;
}
#endif
pf->num_lan_msix = min_t(int, (vec / 2),
pf->num_lan_qps);
pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
......@@ -6494,6 +6679,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
}
#ifdef I40E_FCOE
if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
}
#endif
return err;
}
......@@ -6577,6 +6769,9 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
err = i40e_init_msix(pf);
if (err) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
I40E_FLAG_RSS_ENABLED |
I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_SRIOV_ENABLED |
......@@ -6814,6 +7009,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
}
#ifdef I40E_FCOE
err = i40e_init_pf_fcoe(pf);
if (err)
dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
#endif /* I40E_FCOE */
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
......@@ -7141,6 +7342,10 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_poll_controller = i40e_netpoll,
#endif
.ndo_setup_tc = i40e_setup_tc,
#ifdef I40E_FCOE
.ndo_fcoe_enable = i40e_fcoe_enable,
.ndo_fcoe_disable = i40e_fcoe_disable,
#endif
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
......@@ -7249,6 +7454,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->netdev_ops = &i40e_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
i40e_set_ethtool_ops(netdev);
#ifdef I40E_FCOE
i40e_fcoe_config_netdev(netdev, vsi);
#endif
return 0;
}
......@@ -7368,7 +7576,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
* should be set to zero by default.
*/
ctxt.info.switch_id = 0;
ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
/* Setup the VSI tx/rx queue map for TC0 only for now */
......@@ -7402,6 +7609,16 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
#ifdef I40E_FCOE
case I40E_VSI_FCOE:
ret = i40e_fcoe_vsi_init(vsi, &ctxt);
if (ret) {
dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
return ret;
}
break;
#endif /* I40E_FCOE */
default:
return -ENODEV;
}
......@@ -7760,6 +7977,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
/* setup the netdev if needed */
case I40E_VSI_MAIN:
case I40E_VSI_VMDQ2:
case I40E_VSI_FCOE:
ret = i40e_config_netdev(vsi);
if (ret)
goto err_netdev;
......@@ -8378,6 +8596,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
int queues_left;
pf->num_lan_qps = 0;
#ifdef I40E_FCOE
pf->num_fcoe_qps = 0;
#endif
/* Find the max queues to be put into basic use. We'll always be
* using TC0, whether or not DCB is running, and TC0 will get the
......@@ -8393,6 +8614,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* make sure all the fancies are disabled */
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_CAPABLE |
......@@ -8407,6 +8631,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= pf->num_lan_qps;
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_ENABLED |
......@@ -8422,6 +8649,22 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= pf->num_lan_qps;
}
#ifdef I40E_FCOE
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
if (I40E_DEFAULT_FCOE <= queues_left) {
pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
} else if (I40E_MINIMUM_FCOE <= queues_left) {
pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
} else {
pf->num_fcoe_qps = 0;
pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
}
queues_left -= pf->num_fcoe_qps;
}
#endif
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
if (queues_left > 1) {
queues_left -= 1; /* save 1 queue for FD */
......@@ -8446,6 +8689,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
}
pf->queues_left = queues_left;
#ifdef I40E_FCOE
dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
#endif
}
/**
......@@ -8512,6 +8758,10 @@ static void i40e_print_features(struct i40e_pf *pf)
buf += sprintf(buf, "DCB ");
if (pf->flags & I40E_FLAG_PTP)
buf += sprintf(buf, "PTP ");
#ifdef I40E_FCOE
if (pf->flags & I40E_FLAG_FCOE_ENABLED)
buf += sprintf(buf, "FCOE ");
#endif
BUG_ON(buf > (string + INFO_STRING_LEN));
dev_info(&pf->pdev->dev, "%s\n", string);
......@@ -8699,6 +8949,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
if (is_valid_ether_addr(hw->mac.port_addr))
pf->flags |= I40E_FLAG_PORT_ID_VALID;
#ifdef I40E_FCOE
err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
if (err)
dev_info(&pdev->dev,
"(non-fatal) SAN MAC retrieval failed: %d\n", err);
if (!is_valid_ether_addr(hw->mac.san_addr)) {
dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
hw->mac.san_addr);
ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
}
dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
#endif /* I40E_FCOE */
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
......@@ -8815,6 +9077,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
#ifdef I40E_FCOE
/* create FCoE interface */
i40e_fcoe_vsi_setup(pf);
#endif
/* Get the negotiated link width and speed from PCI config space */
pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
......
......@@ -78,4 +78,7 @@ do { \
} while (0)
typedef enum i40e_status_code i40e_status;
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
#define I40E_FCOE
#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
#endif /* _I40E_OSDEP_H_ */
......@@ -70,6 +70,9 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
u32 reg_addr, u64 reg_val,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
......@@ -237,6 +240,9 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
#ifdef I40E_FCOE
i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
#endif
/* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
......
......@@ -896,6 +896,11 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
i40e_fd_handle_status(rx_ring, rx_desc, id);
#ifdef I40E_FCOE
else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
(id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
i40e_fcoe_handle_status(rx_ring, rx_desc, id);
#endif
}
/**
......@@ -1489,6 +1494,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
#ifdef I40E_FCOE
if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
dev_kfree_skb_any(skb);
goto next_desc;
}
#endif
i40e_receive_skb(rx_ring, skb, vlan_tag);
rx_ring->netdev->last_rx = jiffies;
......@@ -1719,9 +1730,15 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
* Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly.
**/
#ifdef I40E_FCOE
int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
#else
static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
#endif
{
__be16 protocol = skb->protocol;
u32 tx_flags = 0;
......@@ -1743,9 +1760,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
}
/* Insert 802.1p priority into VLAN header */
if ((tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED) &&
((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
(skb->priority != TC_PRIO_CONTROL))) {
if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
(skb->priority != TC_PRIO_CONTROL)) {
tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
tx_flags |= (skb->priority & 0x7) <<
I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
......@@ -2018,9 +2034,15 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
* @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc
**/
#ifdef I40E_FCOE
void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
#else
static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
#endif
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
......@@ -2197,7 +2219,11 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
*
* Returns 0 if stop is not needed
**/
#ifdef I40E_FCOE
int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#else
static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#endif
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
return 0;
......@@ -2213,8 +2239,13 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* there is not enough descriptors available in this ring since we need at least
* one descriptor.
**/
#ifdef I40E_FCOE
int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
#else
static int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
#endif
{
unsigned int f;
int count = 0;
......
......@@ -290,4 +290,13 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
#ifdef I40E_FCOE
void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset);
int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring);
int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, u32 *flags);
#endif
#endif /* _I40E_TXRX_H_ */
......@@ -1051,6 +1051,25 @@ struct i40e_eth_stats {
u64 tx_errors; /* tepc */
};
#ifdef I40E_FCOE
/* Statistics collected per function for FCoE */
struct i40e_fcoe_stats {
u64 rx_fcoe_packets; /* fcoeprc */
u64 rx_fcoe_dwords; /* focedwrc */
u64 rx_fcoe_dropped; /* fcoerpdc */
u64 tx_fcoe_packets; /* fcoeptc */
u64 tx_fcoe_dwords; /* focedwtc */
u64 fcoe_bad_fccrc; /* fcoecrc */
u64 fcoe_last_error; /* fcoelast */
u64 fcoe_ddp_count; /* fcoeddpc */
};
/* offset to per function FCoE statistics block */
#define I40E_FCOE_VF_STAT_OFFSET 0
#define I40E_FCOE_PF_STAT_OFFSET 128
#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF)
#endif
/* Statistics collected by the MAC */
struct i40e_hw_port_stats {
/* eth stats collected by the port */
......@@ -1131,6 +1150,125 @@ struct i40e_hw_port_stats {
#define I40E_SRRD_SRCTL_ATTEMPTS 100000
#ifdef I40E_FCOE
/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */
enum i40E_fcoe_tx_ctx_desc_cmd_bits {
I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */
I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10,
I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20,
I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40,
I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80
};
/* FCoE DDP Context descriptor */
struct i40e_fcoe_ddp_context_desc {
__le64 rsvd;
__le64 type_cmd_foff_lsize;
};
#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0
#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \
I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4
#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)
enum i40e_fcoe_ddp_ctx_desc_cmd_bits {
I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */
I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */
I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */
I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */
I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */
I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */
};
#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16
#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \
I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32
#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \
I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
/* FCoE DDP/DWO Queue Context descriptor */
struct i40e_fcoe_queue_context_desc {
__le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */
__le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */
};
#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0
#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \
I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12
#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \
I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0
#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \
I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13
#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \
I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
enum i40e_fcoe_queue_ctx_desc_tph_bits {
I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1,
I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2
};
#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30
#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \
I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
/* FCoE DDP/DWO Filter Context descriptor */
struct i40e_fcoe_filter_context_desc {
__le32 param;
__le16 seqn;
/* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
__le16 rsvd_dmaindx;
/* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
__le64 flags_rsvd_lanq;
};
#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \
I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
enum i40e_fcoe_filter_ctx_desc_flags_bits {
I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00,
I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01,
I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00,
I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02,
I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00,
I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04
};
#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0
#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \
I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \
I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53
#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \
I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
#endif /* I40E_FCOE */
enum i40e_switch_element_types {
I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
I40E_SWITCH_ELEMENT_TYPE_PF = 2,
......
......@@ -669,7 +669,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
*/
for (i = 0; i < 100; i++) {
/* vf reset requires driver to first reset the
* vf & than poll the status register to make sure
* vf and then poll the status register to make sure
* that the requested op was completed
* successfully
*/
......@@ -1005,7 +1005,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
i40e_status aq_ret;
/* single place to detect unsuccessful return values */
......@@ -1025,7 +1025,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
vf->num_valid_msgs++;
}
aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval,
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret) {
dev_err(&pf->pdev->dev,
......@@ -1163,8 +1163,8 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
(struct i40e_virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_vsi *vsi;
bool allmulti = false;
bool promisc = false;
i40e_status aq_ret;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
......@@ -1174,17 +1174,10 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
promisc = true;
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
promisc, NULL);
if (aq_ret)
goto error_param;
vsi = pf->vsi[info->vsi_id];
if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
allmulti = true;
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
allmulti, NULL);
error_param:
......@@ -1935,15 +1928,17 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
{
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf = pf->vf;
int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
int i;
for (i = 0; i < pf->num_alloc_vfs; i++) {
/* Ignore return value on purpose - a given VF may fail, but
* we need to keep going and send to all of them
*/
i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
msg, msglen, NULL);
vf++;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
}
}
......@@ -1959,6 +1954,7 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf = pf->vf;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
int i;
pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
......@@ -1973,10 +1969,11 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
ls->link_info & I40E_AQ_LINK_UP;
pfe.event_data.link_event.link_speed = ls->link_speed;
}
i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe),
NULL);
vf++;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
}
}
......@@ -2005,10 +2002,11 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
{
struct i40e_virtchnl_pf_event pfe;
int abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
I40E_SUCCESS, (u8 *)&pfe,
sizeof(struct i40e_virtchnl_pf_event), NULL);
}
......@@ -2345,6 +2343,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
struct i40e_virtchnl_pf_event pfe;
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
int abs_vf_id;
int ret = 0;
/* validate the request */
......@@ -2355,6 +2354,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
}
vf = &pf->vf[vf_id];
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
......@@ -2384,7 +2384,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
goto error_out;
}
/* Notify the VF of its new link state */
i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
error_out:
......
......@@ -193,7 +193,7 @@ static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
}
/**
* i40evf_get_drvinto - Get driver info
* i40evf_get_drvinfo - Get driver info
* @netdev: network interface device structure
* @drvinfo: ethool driver info structure
*
......
......@@ -527,7 +527,8 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int err;
sprintf(adapter->misc_vector_name, "i40evf:mbx");
snprintf(adapter->misc_vector_name,
sizeof(adapter->misc_vector_name) - 1, "i40evf:mbx");
err = request_irq(adapter->msix_entries[0].vector,
&i40evf_msix_aq, 0,
adapter->misc_vector_name, netdev);
......@@ -1297,12 +1298,16 @@ static void i40evf_watchdog_task(struct work_struct *work)
struct i40evf_adapter,
watchdog_task);
struct i40e_hw *hw = &adapter->hw;
uint32_t rstat_val;
if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
goto restart_watchdog;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if ((rstat_val == I40E_VFR_VFACTIVE) ||
(rstat_val == I40E_VFR_COMPLETED)) {
/* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
adapter->state = __I40EVF_STARTUP;
......@@ -1328,8 +1333,11 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto watchdog_done;
/* check for reset */
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
(rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
(rstat_val != I40E_VFR_VFACTIVE) &&
(rstat_val != I40E_VFR_COMPLETED)) {
adapter->state = __I40EVF_RESETTING;
adapter->flags |= I40EVF_FLAG_RESET_PENDING;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
......@@ -1395,6 +1403,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
watchdog_done:
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
restart_watchdog:
if (adapter->state == __I40EVF_REMOVE)
return;
if (adapter->aq_required)
mod_timer(&adapter->watchdog_timer,
jiffies + msecs_to_jiffies(20));
......@@ -1495,7 +1505,8 @@ static void i40evf_reset_task(struct work_struct *work)
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (rstat_val != I40E_VFR_VFACTIVE)
if ((rstat_val != I40E_VFR_VFACTIVE) &&
(rstat_val != I40E_VFR_COMPLETED))
break;
else
msleep(I40EVF_RESET_WAIT_MS);
......@@ -1509,12 +1520,16 @@ static void i40evf_reset_task(struct work_struct *work)
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (rstat_val == I40E_VFR_VFACTIVE)
if ((rstat_val == I40E_VFR_VFACTIVE) ||
(rstat_val == I40E_VFR_COMPLETED))
break;
else
msleep(I40EVF_RESET_WAIT_MS);
}
if (i == I40EVF_RESET_WAIT_COUNT) {
struct i40evf_mac_filter *f, *ftmp;
struct i40evf_vlan_filter *fv, *fvtmp;
/* reset never finished */
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
rstat_val);
......@@ -1527,9 +1542,23 @@ static void i40evf_reset_task(struct work_struct *work)
i40evf_free_all_tx_resources(adapter);
i40evf_free_all_rx_resources(adapter);
}
/* Delete all of the filters, both MAC and VLAN. */
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
list) {
list_del(&f->list);
kfree(f);
}
list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list,
list) {
list_del(&fv->list);
kfree(fv);
}
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_queues(adapter);
i40evf_free_q_vectors(adapter);
kfree(adapter->vf_res);
i40evf_shutdown_adminq(hw);
adapter->netdev->flags &= ~IFF_UP;
......@@ -1946,8 +1975,10 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
int i;
for (i = 0; i < 100; i++) {
rstat = rd32(hw, I40E_VFGEN_RSTAT);
if (rstat == I40E_VFR_VFACTIVE)
rstat = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if ((rstat == I40E_VFR_VFACTIVE) ||
(rstat == I40E_VFR_COMPLETED))
return 0;
udelay(10);
}
......@@ -2106,8 +2137,6 @@ static void i40evf_init_task(struct work_struct *work)
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (NULL == f)
goto err_sw_init;
......@@ -2289,6 +2318,9 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
INIT_WORK(&adapter->reset_task, i40evf_reset_task);
INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
......@@ -2400,6 +2432,7 @@ static void i40evf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40evf_mac_filter *f, *ftmp;
struct i40e_hw *hw = &adapter->hw;
cancel_delayed_work_sync(&adapter->init_task);
......@@ -2415,6 +2448,7 @@ static void i40evf_remove(struct pci_dev *pdev)
i40evf_misc_irq_disable(adapter);
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_q_vectors(adapter);
}
if (adapter->watchdog_timer.function)
......@@ -2430,6 +2464,13 @@ static void i40evf_remove(struct pci_dev *pdev)
i40evf_free_queues(adapter);
kfree(adapter->vf_res);
/* If we got removed before an up/down sequence, we've got a filter
* hanging out there that we need to get rid of.
*/
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
list_del(&f->list);
kfree(f);
}
free_netdev(netdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment