Commit d4f38620 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2014-06-11

This series contains updates to igb, i40e and i40evf.

Todd makes a change to igb to un-hide invariant returns by getting rid of
the E1000_SUCCESS define and converting those returns to return 0.

Jacob separates the hardware logic from the set function, so that we can
re-use it during a ptp_reset in igb.  This enables the reset to return
functionality to the last know timestamp mode, rather than resetting the
value.

Ashish implements context flags for headwb and headwb_addr so that we
do not have to keep them always enabled.

Shannon updates the admin queue API for the new firmware, which adds
set_pf_content, nvm_config_read/write, replaces set_phy_reset with
set_phy_debug and removes nvm_read/write_reg_se.  Cleans up the driver
to use the stored base_queue value since there is no need to read the
PCI register for the PF's base queue on every single transmit queue
enable and disable as we already have the value stored from reading
the capability features at startup.

Anjali changes the notion of source and destination for FD_SB in ethtool
to align i40e with other drivers.  Adds flow director statistics to
the PF stats.  Fixes a bug in ethtool for flow director drop packet
filter where the drop action comes down as a ring_cookie value, so allow
it as a special value that can be used to configure destination control.

Mitch fixes the i40evf to keep the driver from going down when it is
already in a down state.  This prevents a CPU soft lock in napi_disable().
Also change the i40evf to check the admin queue error bits since the
firmware can indicate any admin queue error states to the driver via
some bits in the length registers.

Neerav separates out the DCB capability and enabled flags because currently
if the firmware reports DCB capability the driver enables
I40E_FLAG_DCB_ENABLED flag.  When this flag is enabled the driver inserts
a tag when transmitting a packet from the port even if there are no DCB
traffic classes configured at the port.  So by adding the additional flag,
I40E_FLAG_DCB_CAPABLE, that will be set when the DCB capability is present
and the existing enabled flag will only be set if there are more than one
traffic classes configured at the port.

Greg fixes the i40e driver to not automatically accept tagged packets by
default so that the system must request a VLAN tag packet filter to get
packets with that tag.  Greg also converts i40e to use the in-kernel
ether_addr_copy() instead of mempcy().

Jesse removes the FTYPE field from the receive descriptor to match the
hardware implementation.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 813ebbbf f8320902
...@@ -154,11 +154,23 @@ struct i40e_lump_tracking { ...@@ -154,11 +154,23 @@ struct i40e_lump_tracking {
#define I40E_FDIR_BUFFER_FULL_MARGIN 10 #define I40E_FDIR_BUFFER_FULL_MARGIN 10
#define I40E_FDIR_BUFFER_HEAD_ROOM 200 #define I40E_FDIR_BUFFER_HEAD_ROOM 200
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
I40E_FD_STAT_SB,
I40E_FD_STAT_PF_COUNT
};
#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
#define I40E_FD_ATR_STAT_IDX(pf_id) \
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
#define I40E_FD_SB_STAT_IDX(pf_id) \
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
struct i40e_fdir_filter { struct i40e_fdir_filter {
struct hlist_node fdir_node; struct hlist_node fdir_node;
/* filter ipnut set */ /* filter ipnut set */
u8 flow_type; u8 flow_type;
u8 ip4_proto; u8 ip4_proto;
/* TX packet view of src and dst */
__be32 dst_ip[4]; __be32 dst_ip[4];
__be32 src_ip[4]; __be32 src_ip[4];
__be16 src_port; __be16 src_port;
...@@ -222,6 +234,8 @@ struct i40e_pf { ...@@ -222,6 +234,8 @@ struct i40e_pf {
struct hlist_head fdir_filter_list; struct hlist_head fdir_filter_list;
u16 fdir_pf_active_filters; u16 fdir_pf_active_filters;
u16 fd_sb_cnt_idx;
u16 fd_atr_cnt_idx;
#ifdef CONFIG_I40E_VXLAN #ifdef CONFIG_I40E_VXLAN
__be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
...@@ -263,6 +277,7 @@ struct i40e_pf { ...@@ -263,6 +277,7 @@ struct i40e_pf {
#ifdef CONFIG_I40E_VXLAN #ifdef CONFIG_I40E_VXLAN
#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) #define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
#endif #endif
#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
/* tracks features that get auto disabled by errors */ /* tracks features that get auto disabled by errors */
u64 auto_disable_flags; u64 auto_disable_flags;
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
*/ */
#define I40E_FW_API_VERSION_MAJOR 0x0001 #define I40E_FW_API_VERSION_MAJOR 0x0001
#define I40E_FW_API_VERSION_MINOR 0x0001 #define I40E_FW_API_VERSION_MINOR 0x0002
struct i40e_aq_desc { struct i40e_aq_desc {
__le16 flags; __le16 flags;
...@@ -123,6 +123,7 @@ enum i40e_admin_queue_opc { ...@@ -123,6 +123,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_version = 0x0001, i40e_aqc_opc_get_version = 0x0001,
i40e_aqc_opc_driver_version = 0x0002, i40e_aqc_opc_driver_version = 0x0002,
i40e_aqc_opc_queue_shutdown = 0x0003, i40e_aqc_opc_queue_shutdown = 0x0003,
i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */ /* resource ownership */
i40e_aqc_opc_request_resource = 0x0008, i40e_aqc_opc_request_resource = 0x0008,
...@@ -222,13 +223,15 @@ enum i40e_admin_queue_opc { ...@@ -222,13 +223,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_partner_advt = 0x0616, i40e_aqc_opc_get_partner_advt = 0x0616,
i40e_aqc_opc_set_lb_modes = 0x0618, i40e_aqc_opc_set_lb_modes = 0x0618,
i40e_aqc_opc_get_phy_wol_caps = 0x0621, i40e_aqc_opc_get_phy_wol_caps = 0x0621,
i40e_aqc_opc_set_phy_reset = 0x0622, i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625, i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */ /* NVM commands */
i40e_aqc_opc_nvm_read = 0x0701, i40e_aqc_opc_nvm_read = 0x0701,
i40e_aqc_opc_nvm_erase = 0x0702, i40e_aqc_opc_nvm_erase = 0x0702,
i40e_aqc_opc_nvm_update = 0x0703, i40e_aqc_opc_nvm_update = 0x0703,
i40e_aqc_opc_nvm_config_read = 0x0704,
i40e_aqc_opc_nvm_config_write = 0x0705,
/* virtualization commands */ /* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801, i40e_aqc_opc_send_msg_to_pf = 0x0801,
...@@ -270,8 +273,6 @@ enum i40e_admin_queue_opc { ...@@ -270,8 +273,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_debug_set_mode = 0xFF01, i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03, i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04, i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
i40e_aqc_opc_debug_modify_reg = 0xFF07, i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08, i40e_aqc_opc_debug_dump_internals = 0xFF08,
i40e_aqc_opc_debug_modify_internals = 0xFF09, i40e_aqc_opc_debug_modify_internals = 0xFF09,
...@@ -339,6 +340,14 @@ struct i40e_aqc_queue_shutdown { ...@@ -339,6 +340,14 @@ struct i40e_aqc_queue_shutdown {
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
/* Set PF context (0x0004, direct) */
struct i40e_aqc_set_pf_context {
u8 pf_id;
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
/* Request resource ownership (direct 0x0008) /* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009) * Release resource ownership (direct 0x0009)
*/ */
...@@ -1404,11 +1413,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); ...@@ -1404,11 +1413,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
struct i40e_aqc_configure_switching_comp_ets_data { struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved[4]; u8 reserved[4];
u8 tc_valid_bits; u8 tc_valid_bits;
u8 reserved1; u8 seepage;
#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
u8 tc_strict_priority_flags; u8 tc_strict_priority_flags;
u8 reserved2[17]; u8 reserved1[17];
u8 tc_bw_share_credits[8]; u8 tc_bw_share_credits[8];
u8 reserved3[96]; u8 reserved2[96];
}; };
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
...@@ -1525,6 +1535,8 @@ enum i40e_aq_phy_type { ...@@ -1525,6 +1535,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_XLPPI = 0x9, I40E_PHY_TYPE_XLPPI = 0x9,
I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
I40E_PHY_TYPE_10GBASE_AOC = 0xC,
I40E_PHY_TYPE_40GBASE_AOC = 0xD,
I40E_PHY_TYPE_100BASE_TX = 0x11, I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12, I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13, I40E_PHY_TYPE_10GBASE_T = 0x13,
...@@ -1535,7 +1547,10 @@ enum i40e_aq_phy_type { ...@@ -1535,7 +1547,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_40GBASE_CR4 = 0x18, I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
I40E_PHY_TYPE_40GBASE_SR4 = 0x19, I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1B, I40E_PHY_TYPE_1000BASE_SX = 0x1B,
I40E_PHY_TYPE_1000BASE_LX = 0x1C,
I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
I40E_PHY_TYPE_MAX I40E_PHY_TYPE_MAX
}; };
...@@ -1679,6 +1694,7 @@ struct i40e_aqc_get_link_status { ...@@ -1679,6 +1694,7 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_ACTIVE 0x00 #define I40E_AQ_LINK_TX_ACTIVE 0x00
#define I40E_AQ_LINK_TX_DRAINED 0x01 #define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03 #define I40E_AQ_LINK_TX_FLUSHED 0x03
#define I40E_AQ_LINK_FORCED_40G 0x10
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size; __le16 max_frame_size;
u8 config; u8 config;
...@@ -1730,14 +1746,21 @@ struct i40e_aqc_set_lb_mode { ...@@ -1730,14 +1746,21 @@ struct i40e_aqc_set_lb_mode {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
/* Set PHY Reset command (0x0622) */ /* Set PHY Debug command (0x0622) */
struct i40e_aqc_set_phy_reset { struct i40e_aqc_set_phy_debug {
u8 reset_flags; u8 command_flags;
#define I40E_AQ_PHY_RESET_REQUEST 0x02 #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
u8 reserved[15]; u8 reserved[15];
}; };
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset); I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
enum i40e_aq_phy_reg_type { enum i40e_aq_phy_reg_type {
I40E_AQC_PHY_REG_INTERNAL = 0x1, I40E_AQC_PHY_REG_INTERNAL = 0x1,
...@@ -1762,6 +1785,47 @@ struct i40e_aqc_nvm_update { ...@@ -1762,6 +1785,47 @@ struct i40e_aqc_nvm_update {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
/* NVM Config Read (indirect 0x0704) */
struct i40e_aqc_nvm_config_read {
__le16 cmd_flags;
#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
#define ANVM_READ_SINGLE_FEATURE 0
#define ANVM_READ_MULTIPLE_FEATURES 1
__le16 element_count;
__le16 element_id; /* Feature/field ID */
u8 reserved[2];
__le32 address_high;
__le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
/* NVM Config Write (indirect 0x0705) */
struct i40e_aqc_nvm_config_write {
__le16 cmd_flags;
__le16 element_count;
u8 reserved[4];
__le32 address_high;
__le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
__le16 instance_id;
__le16 feature_options;
__le16 feature_selection;
};
struct i40e_aqc_nvm_config_data_immediate_field {
#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
__le16 field_id;
__le16 instance_id;
__le16 field_options;
__le16 field_value;
};
/* Send to PF command (indirect 0x0801) id is only used by PF /* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803) * Send to Peer PF command (indirect 0x0803)
......
...@@ -665,10 +665,9 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) ...@@ -665,10 +665,9 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
**/ **/
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
{ {
u32 reg_val = rd32(hw, I40E_PFLAN_QALLOC); u32 abs_queue_idx = hw->func_caps.base_queue + queue;
u32 first_queue = (reg_val & I40E_PFLAN_QALLOC_FIRSTQ_MASK);
u32 abs_queue_idx = first_queue + queue;
u32 reg_block = 0; u32 reg_block = 0;
u32 reg_val;
if (abs_queue_idx >= 128) if (abs_queue_idx >= 128)
reg_block = abs_queue_idx / 128; reg_block = abs_queue_idx / 128;
......
...@@ -302,8 +302,8 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi) ...@@ -302,8 +302,8 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
struct net_device *dev = vsi->netdev; struct net_device *dev = vsi->netdev;
struct i40e_pf *pf = i40e_netdev_to_pf(dev); struct i40e_pf *pf = i40e_netdev_to_pf(dev);
/* DCB not enabled */ /* Not DCB capable */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return; return;
/* Do not setup DCB NL ops for MFP mode */ /* Do not setup DCB NL ops for MFP mode */
......
...@@ -145,6 +145,9 @@ static struct i40e_stats i40e_gstrings_stats[] = { ...@@ -145,6 +145,9 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_jabber", stats.rx_jabber), I40E_PF_STAT("rx_jabber", stats.rx_jabber),
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
/* LPI stats */ /* LPI stats */
I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status), I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
...@@ -1249,11 +1252,18 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, ...@@ -1249,11 +1252,18 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
fsp->m_u.usr_ip4_spec.proto = 0; fsp->m_u.usr_ip4_spec.proto = 0;
} }
fsp->h_u.tcp_ip4_spec.psrc = rule->src_port; /* Reverse the src and dest notion, since the HW views them from
fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port; * Tx perspective where as the user expects it from Rx filter view.
fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0]; */
fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0]; fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
fsp->ring_cookie = rule->q_index; fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
fsp->ring_cookie = RX_CLS_FLOW_DISC;
else
fsp->ring_cookie = rule->q_index;
return 0; return 0;
} }
...@@ -1557,7 +1567,8 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, ...@@ -1557,7 +1567,8 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
return -EINVAL; return -EINVAL;
} }
if (fsp->ring_cookie >= vsi->num_queue_pairs) if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
(fsp->ring_cookie >= vsi->num_queue_pairs))
return -EINVAL; return -EINVAL;
input = kzalloc(sizeof(*input), GFP_KERNEL); input = kzalloc(sizeof(*input), GFP_KERNEL);
...@@ -1578,13 +1589,17 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, ...@@ -1578,13 +1589,17 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->pctype = 0; input->pctype = 0;
input->dest_vsi = vsi->id; input->dest_vsi = vsi->id;
input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
input->cnt_index = 0; input->cnt_index = pf->fd_sb_cnt_idx;
input->flow_type = fsp->flow_type; input->flow_type = fsp->flow_type;
input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
input->src_port = fsp->h_u.tcp_ip4_spec.psrc;
input->dst_port = fsp->h_u.tcp_ip4_spec.pdst; /* Reverse the src and dest notion, since the HW expects them to be from
input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; * Tx perspective where as the input from user is from Rx filter view.
input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; */
input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
ret = i40e_add_del_fdir(vsi, input, true); ret = i40e_add_del_fdir(vsi, input, true);
if (ret) if (ret)
......
...@@ -39,7 +39,7 @@ static const char i40e_driver_string[] = ...@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 4 #define DRV_VERSION_MINOR 4
#define DRV_VERSION_BUILD 7 #define DRV_VERSION_BUILD 10
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN __stringify(DRV_VERSION_BUILD) DRV_KERN
...@@ -1013,6 +1013,14 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) ...@@ -1013,6 +1013,14 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
pf->stat_offsets_loaded, pf->stat_offsets_loaded,
&osd->rx_jabber, &nsd->rx_jabber); &osd->rx_jabber, &nsd->rx_jabber);
/* FDIR stats */
i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
pf->stat_offsets_loaded,
&osd->fd_atr_match, &nsd->fd_atr_match);
i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
pf->stat_offsets_loaded,
&osd->fd_sb_match, &nsd->fd_sb_match);
val = rd32(hw, I40E_PRTPM_EEE_STAT); val = rd32(hw, I40E_PRTPM_EEE_STAT);
nsd->tx_lpi_status = nsd->tx_lpi_status =
(val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
...@@ -1153,6 +1161,30 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, ...@@ -1153,6 +1161,30 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
struct i40e_mac_filter, list); struct i40e_mac_filter, list);
} }
/**
* i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
* @vsi: the PF Main VSI - inappropriate for any other VSI
* @macaddr: the MAC address
**/
static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
{
struct i40e_aqc_remove_macvlan_element_data element;
struct i40e_pf *pf = vsi->back;
i40e_status aq_ret;
/* Only appropriate for the PF main VSI */
if (vsi->type != I40E_VSI_MAIN)
return;
ether_addr_copy(element.mac_addr, macaddr);
element.vlan_tag = 0;
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
if (aq_ret)
dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n");
}
/** /**
* i40e_add_filter - Add a mac/vlan filter to the VSI * i40e_add_filter - Add a mac/vlan filter to the VSI
* @vsi: the VSI to be searched * @vsi: the VSI to be searched
...@@ -1178,7 +1210,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, ...@@ -1178,7 +1210,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
if (!f) if (!f)
goto add_filter_out; goto add_filter_out;
memcpy(f->macaddr, macaddr, ETH_ALEN); ether_addr_copy(f->macaddr, macaddr);
f->vlan = vlan; f->vlan = vlan;
f->changed = true; f->changed = true;
...@@ -1302,7 +1334,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ...@@ -1302,7 +1334,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
} }
memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len); ether_addr_copy(vsi->back->hw.mac.addr, addr->sa_data);
} }
/* In order to be sure to not drop any packets, add the new address /* In order to be sure to not drop any packets, add the new address
...@@ -1316,7 +1348,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ...@@ -1316,7 +1348,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
i40e_sync_vsi_filters(vsi); i40e_sync_vsi_filters(vsi);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); ether_addr_copy(netdev->dev_addr, addr->sa_data);
return 0; return 0;
} }
...@@ -1573,8 +1605,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -1573,8 +1605,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cmd_flags = 0; cmd_flags = 0;
/* add to delete list */ /* add to delete list */
memcpy(del_list[num_del].mac_addr, ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
f->macaddr, ETH_ALEN);
del_list[num_del].vlan_tag = del_list[num_del].vlan_tag =
cpu_to_le16((u16)(f->vlan == cpu_to_le16((u16)(f->vlan ==
I40E_VLAN_ANY ? 0 : f->vlan)); I40E_VLAN_ANY ? 0 : f->vlan));
...@@ -1639,8 +1670,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -1639,8 +1670,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cmd_flags = 0; cmd_flags = 0;
/* add to add array */ /* add to add array */
memcpy(add_list[num_add].mac_addr, ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
f->macaddr, ETH_ALEN);
add_list[num_add].vlan_tag = add_list[num_add].vlan_tag =
cpu_to_le16( cpu_to_le16(
(u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
...@@ -4130,7 +4160,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) ...@@ -4130,7 +4160,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
/* When status is not DISABLED then DCBX in FW */ /* When status is not DISABLED then DCBX in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
DCB_CAP_DCBX_VER_IEEE; DCB_CAP_DCBX_VER_IEEE;
pf->flags |= I40E_FLAG_DCB_ENABLED;
pf->flags |= I40E_FLAG_DCB_CAPABLE;
/* Enable DCB tagging only when more than one TC */
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
pf->flags |= I40E_FLAG_DCB_ENABLED;
} }
} else { } else {
dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n", dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
...@@ -4685,6 +4719,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, ...@@ -4685,6 +4719,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
int ret = 0; int ret = 0;
u8 type; u8 type;
/* Not DCB capable or capability disabled */
if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return ret;
/* Ignore if event is not for Nearest Bridge */ /* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
& I40E_AQ_LLDP_BRIDGE_TYPE_MASK); & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
...@@ -4726,6 +4764,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, ...@@ -4726,6 +4764,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
if (!need_reconfig) if (!need_reconfig)
goto exit; goto exit;
/* Enable DCB tagging only when more than one TC */
if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
pf->flags |= I40E_FLAG_DCB_ENABLED;
else
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
/* Reconfiguration needed quiesce all VSIs */ /* Reconfiguration needed quiesce all VSIs */
i40e_pf_quiesce_all_vsi(pf); i40e_pf_quiesce_all_vsi(pf);
...@@ -6365,7 +6409,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) ...@@ -6365,7 +6409,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (err) { if (err) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_RSS_ENABLED | I40E_FLAG_RSS_ENABLED |
I40E_FLAG_DCB_ENABLED | I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_FD_ATR_ENABLED |
...@@ -6568,8 +6612,12 @@ static int i40e_sw_init(struct i40e_pf *pf) ...@@ -6568,8 +6612,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) { (pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
/* Setup a counter for fd_atr per pf */
pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED; pf->flags |= I40E_FLAG_FD_SB_ENABLED;
/* Setup a counter for fd_sb per pf */
pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
} else { } else {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n"); "Flow Director Sideband mode Disabled in MFP mode\n");
...@@ -6965,7 +7013,15 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) ...@@ -6965,7 +7013,15 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (vsi->type == I40E_VSI_MAIN) { if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev); SET_NETDEV_DEV(netdev, &pf->pdev->dev);
memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); ether_addr_copy(mac_addr, hw->mac.perm_addr);
/* The following two steps are necessary to prevent reception
* of tagged packets - by default the NVM loads a MAC-VLAN
* filter that will accept any tagged packet. This is to
* prevent that during normal operations until a specific
* VLAN tag filter has been set.
*/
i40e_rm_default_mac_filter(vsi, mac_addr);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
} else { } else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */ /* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d", snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
...@@ -6975,8 +7031,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) ...@@ -6975,8 +7031,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
} }
i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); ether_addr_copy(netdev->dev_addr, mac_addr);
memcpy(netdev->perm_addr, mac_addr, ETH_ALEN); ether_addr_copy(netdev->perm_addr, mac_addr);
/* vlan gets same features (except vlan offload) /* vlan gets same features (except vlan offload)
* after any tweaks for specific VSI types * after any tweaks for specific VSI types
*/ */
...@@ -8187,13 +8243,13 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) ...@@ -8187,13 +8243,13 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->flags &= ~(I40E_FLAG_RSS_ENABLED | pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_ENABLED | I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_VMDQ_ENABLED); I40E_FLAG_VMDQ_ENABLED);
} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_ENABLED))) { I40E_FLAG_DCB_CAPABLE))) {
/* one qp for PF */ /* one qp for PF */
pf->rss_size = pf->num_lan_qps = 1; pf->rss_size = pf->num_lan_qps = 1;
queues_left -= pf->num_lan_qps; queues_left -= pf->num_lan_qps;
...@@ -8205,9 +8261,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) ...@@ -8205,9 +8261,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_VMDQ_ENABLED); I40E_FLAG_VMDQ_ENABLED);
} else { } else {
/* Not enough queues for all TCs */ /* Not enough queues for all TCs */
if ((pf->flags & I40E_FLAG_DCB_ENABLED) && if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
(queues_left < I40E_MAX_TRAFFIC_CLASS)) { (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
pf->flags &= ~I40E_FLAG_DCB_ENABLED; pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
} }
pf->num_lan_qps = pf->rss_size_max; pf->num_lan_qps = pf->rss_size_max;
...@@ -8300,7 +8356,7 @@ static void i40e_print_features(struct i40e_pf *pf) ...@@ -8300,7 +8356,7 @@ static void i40e_print_features(struct i40e_pf *pf)
buf += sprintf(buf, "FD_SB "); buf += sprintf(buf, "FD_SB ");
buf += sprintf(buf, "NTUPLE "); buf += sprintf(buf, "NTUPLE ");
} }
if (pf->flags & I40E_FLAG_DCB_ENABLED) if (pf->flags & I40E_FLAG_DCB_CAPABLE)
buf += sprintf(buf, "DCB "); buf += sprintf(buf, "DCB ");
if (pf->flags & I40E_FLAG_PTP) if (pf->flags & I40E_FLAG_PTP)
buf += sprintf(buf, "PTP "); buf += sprintf(buf, "PTP ");
...@@ -8478,7 +8534,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -8478,7 +8534,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_mac_addr; goto err_mac_addr;
} }
dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN); ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
pci_set_drvdata(pdev, pf); pci_set_drvdata(pdev, pf);
pci_save_state(pdev); pci_save_state(pdev);
...@@ -8486,7 +8542,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -8486,7 +8542,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf); err = i40e_init_pf_dcb(pf);
if (err) { if (err) {
dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err); dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
pf->flags &= ~I40E_FLAG_DCB_ENABLED; pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
/* Continue without DCB enabled */ /* Continue without DCB enabled */
} }
#endif /* CONFIG_I40E_DCB */ #endif /* CONFIG_I40E_DCB */
......
...@@ -70,7 +70,7 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, ...@@ -70,7 +70,7 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version, u16 *fw_major_version, u16 *fw_minor_version,
u16 *api_major_version, u16 *api_minor_version, u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details); struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw, i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details); struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details); struct i40e_asq_cmd_details *cmd_details);
......
...@@ -121,7 +121,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, ...@@ -121,7 +121,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
dcc |= ((u32)fdir_data->cnt_index << dcc |= ((u32)fdir_data->cnt_index <<
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK; I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
} }
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc); fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
...@@ -1695,6 +1695,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1695,6 +1695,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
dtype_cmd |=
((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
} }
......
...@@ -663,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits { ...@@ -663,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
...@@ -1024,6 +1023,9 @@ struct i40e_hw_port_stats { ...@@ -1024,6 +1023,9 @@ struct i40e_hw_port_stats {
u64 tx_size_big; /* ptc9522 */ u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */ u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */ u64 checksum_error; /* xec */
/* flow director stats */
u64 fd_atr_match;
u64 fd_sb_match;
/* EEE LPI */ /* EEE LPI */
u32 tx_lpi_status; u32 tx_lpi_status;
u32 rx_lpi_status; u32 rx_lpi_status;
......
...@@ -248,9 +248,8 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, ...@@ -248,9 +248,8 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
tx_ctx.qlen = info->ring_len; tx_ctx.qlen = info->ring_len;
tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
tx_ctx.rdylist_act = 0; tx_ctx.rdylist_act = 0;
tx_ctx.head_wb_ena = 1; tx_ctx.head_wb_ena = info->headwb_enabled;
tx_ctx.head_wb_addr = info->dma_ring_addr + tx_ctx.head_wb_addr = info->dma_headwb_addr;
(info->ring_len * sizeof(struct i40e_tx_desc));
/* clear the context in the HMC */ /* clear the context in the HMC */
ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
...@@ -2076,7 +2075,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -2076,7 +2075,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
ret = -EIO; ret = -EIO;
goto error_param; goto error_param;
} }
memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN); ether_addr_copy(vf->default_lan_addr.addr, mac);
vf->pf_set_mac = true; vf->pf_set_mac = true;
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
ret = 0; ret = 0;
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
*/ */
#define I40E_FW_API_VERSION_MAJOR 0x0001 #define I40E_FW_API_VERSION_MAJOR 0x0001
#define I40E_FW_API_VERSION_MINOR 0x0001 #define I40E_FW_API_VERSION_MINOR 0x0002
#define I40E_FW_API_VERSION_A0_MINOR 0x0000 #define I40E_FW_API_VERSION_A0_MINOR 0x0000
struct i40e_aq_desc { struct i40e_aq_desc {
...@@ -124,6 +124,7 @@ enum i40e_admin_queue_opc { ...@@ -124,6 +124,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_version = 0x0001, i40e_aqc_opc_get_version = 0x0001,
i40e_aqc_opc_driver_version = 0x0002, i40e_aqc_opc_driver_version = 0x0002,
i40e_aqc_opc_queue_shutdown = 0x0003, i40e_aqc_opc_queue_shutdown = 0x0003,
i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */ /* resource ownership */
i40e_aqc_opc_request_resource = 0x0008, i40e_aqc_opc_request_resource = 0x0008,
...@@ -223,13 +224,15 @@ enum i40e_admin_queue_opc { ...@@ -223,13 +224,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_partner_advt = 0x0616, i40e_aqc_opc_get_partner_advt = 0x0616,
i40e_aqc_opc_set_lb_modes = 0x0618, i40e_aqc_opc_set_lb_modes = 0x0618,
i40e_aqc_opc_get_phy_wol_caps = 0x0621, i40e_aqc_opc_get_phy_wol_caps = 0x0621,
i40e_aqc_opc_set_phy_reset = 0x0622, i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625, i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */ /* NVM commands */
i40e_aqc_opc_nvm_read = 0x0701, i40e_aqc_opc_nvm_read = 0x0701,
i40e_aqc_opc_nvm_erase = 0x0702, i40e_aqc_opc_nvm_erase = 0x0702,
i40e_aqc_opc_nvm_update = 0x0703, i40e_aqc_opc_nvm_update = 0x0703,
i40e_aqc_opc_nvm_config_read = 0x0704,
i40e_aqc_opc_nvm_config_write = 0x0705,
/* virtualization commands */ /* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801, i40e_aqc_opc_send_msg_to_pf = 0x0801,
...@@ -271,8 +274,6 @@ enum i40e_admin_queue_opc { ...@@ -271,8 +274,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_debug_set_mode = 0xFF01, i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03, i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04, i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
i40e_aqc_opc_debug_modify_reg = 0xFF07, i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08, i40e_aqc_opc_debug_dump_internals = 0xFF08,
i40e_aqc_opc_debug_modify_internals = 0xFF09, i40e_aqc_opc_debug_modify_internals = 0xFF09,
...@@ -340,6 +341,14 @@ struct i40e_aqc_queue_shutdown { ...@@ -340,6 +341,14 @@ struct i40e_aqc_queue_shutdown {
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
/* Set PF context (0x0004, direct) */
struct i40e_aqc_set_pf_context {
u8 pf_id;
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
/* Request resource ownership (direct 0x0008) /* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009) * Release resource ownership (direct 0x0009)
*/ */
...@@ -1408,11 +1417,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); ...@@ -1408,11 +1417,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
struct i40e_aqc_configure_switching_comp_ets_data { struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved[4]; u8 reserved[4];
u8 tc_valid_bits; u8 tc_valid_bits;
u8 reserved1; u8 seepage;
#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
u8 tc_strict_priority_flags; u8 tc_strict_priority_flags;
u8 reserved2[17]; u8 reserved1[17];
u8 tc_bw_share_credits[8]; u8 tc_bw_share_credits[8];
u8 reserved3[96]; u8 reserved2[96];
}; };
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
...@@ -1529,6 +1539,8 @@ enum i40e_aq_phy_type { ...@@ -1529,6 +1539,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_XLPPI = 0x9, I40E_PHY_TYPE_XLPPI = 0x9,
I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
I40E_PHY_TYPE_10GBASE_AOC = 0xC,
I40E_PHY_TYPE_40GBASE_AOC = 0xD,
I40E_PHY_TYPE_100BASE_TX = 0x11, I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12, I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13, I40E_PHY_TYPE_10GBASE_T = 0x13,
...@@ -1539,7 +1551,10 @@ enum i40e_aq_phy_type { ...@@ -1539,7 +1551,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_40GBASE_CR4 = 0x18, I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
I40E_PHY_TYPE_40GBASE_SR4 = 0x19, I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1B, I40E_PHY_TYPE_1000BASE_SX = 0x1B,
I40E_PHY_TYPE_1000BASE_LX = 0x1C,
I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
I40E_PHY_TYPE_MAX I40E_PHY_TYPE_MAX
}; };
...@@ -1683,6 +1698,7 @@ struct i40e_aqc_get_link_status { ...@@ -1683,6 +1698,7 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_ACTIVE 0x00 #define I40E_AQ_LINK_TX_ACTIVE 0x00
#define I40E_AQ_LINK_TX_DRAINED 0x01 #define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03 #define I40E_AQ_LINK_TX_FLUSHED 0x03
#define I40E_AQ_LINK_FORCED_40G 0x10
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size; __le16 max_frame_size;
u8 config; u8 config;
...@@ -1734,14 +1750,21 @@ struct i40e_aqc_set_lb_mode { ...@@ -1734,14 +1750,21 @@ struct i40e_aqc_set_lb_mode {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
/* Set PHY Reset command (0x0622) */ /* Set PHY Debug command (0x0622) */
struct i40e_aqc_set_phy_reset { struct i40e_aqc_set_phy_debug {
u8 reset_flags; u8 command_flags;
#define I40E_AQ_PHY_RESET_REQUEST 0x02 #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
u8 reserved[15]; u8 reserved[15];
}; };
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset); I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
enum i40e_aq_phy_reg_type { enum i40e_aq_phy_reg_type {
I40E_AQC_PHY_REG_INTERNAL = 0x1, I40E_AQC_PHY_REG_INTERNAL = 0x1,
...@@ -1766,6 +1789,47 @@ struct i40e_aqc_nvm_update { ...@@ -1766,6 +1789,47 @@ struct i40e_aqc_nvm_update {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
/* NVM Config Read (indirect 0x0704) */
struct i40e_aqc_nvm_config_read {
__le16 cmd_flags;
#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
#define ANVM_READ_SINGLE_FEATURE 0
#define ANVM_READ_MULTIPLE_FEATURES 1
__le16 element_count;
__le16 element_id; /* Feature/field ID */
u8 reserved[2];
__le32 address_high;
__le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
/* NVM Config Write (indirect 0x0705) */
struct i40e_aqc_nvm_config_write {
__le16 cmd_flags;
__le16 element_count;
u8 reserved[4];
__le32 address_high;
__le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
__le16 instance_id;
__le16 feature_options;
__le16 feature_selection;
};
struct i40e_aqc_nvm_config_data_immediate_field {
#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
__le16 field_id;
__le16 instance_id;
__le16 field_options;
__le16 field_value;
};
/* Send to PF command (indirect 0x0801) id is only used by PF /* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803) * Send to Peer PF command (indirect 0x0803)
......
...@@ -663,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits { ...@@ -663,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
...@@ -1024,6 +1023,9 @@ struct i40e_hw_port_stats { ...@@ -1024,6 +1023,9 @@ struct i40e_hw_port_stats {
u64 tx_size_big; /* ptc9522 */ u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */ u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */ u64 checksum_error; /* xec */
/* flow director stats */
u64 fd_atr_match;
u64 fd_sb_match;
/* EEE LPI */ /* EEE LPI */
u32 tx_lpi_status; u32 tx_lpi_status;
u32 rx_lpi_status; u32 rx_lpi_status;
......
...@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf"; ...@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] = static const char i40evf_driver_string[] =
"Intel(R) XL710 X710 Virtual Function Network Driver"; "Intel(R) XL710 X710 Virtual Function Network Driver";
#define DRV_VERSION "0.9.31" #define DRV_VERSION "0.9.34"
const char i40evf_driver_version[] = DRV_VERSION; const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] = static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation."; "Copyright (c) 2013 - 2014 Intel Corporation.";
...@@ -772,7 +772,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, ...@@ -772,7 +772,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
return NULL; return NULL;
} }
memcpy(f->macaddr, macaddr, ETH_ALEN); ether_addr_copy(f->macaddr, macaddr);
list_add(&f->list, &adapter->mac_filter_list); list_add(&f->list, &adapter->mac_filter_list);
f->add = true; f->add = true;
...@@ -805,9 +805,8 @@ static int i40evf_set_mac(struct net_device *netdev, void *p) ...@@ -805,9 +805,8 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
f = i40evf_add_filter(adapter, addr->sa_data); f = i40evf_add_filter(adapter, addr->sa_data);
if (f) { if (f) {
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); ether_addr_copy(hw->mac.addr, addr->sa_data);
memcpy(netdev->dev_addr, adapter->hw.mac.addr, ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
netdev->addr_len);
} }
return (f == NULL) ? -ENOMEM : 0; return (f == NULL) ? -ENOMEM : 0;
...@@ -968,6 +967,9 @@ void i40evf_down(struct i40evf_adapter *adapter) ...@@ -968,6 +967,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct i40evf_mac_filter *f; struct i40evf_mac_filter *f;
if (adapter->state == __I40EVF_DOWN)
return;
/* remove all MAC filters */ /* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) { list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true; f->remove = true;
...@@ -1588,6 +1590,7 @@ static void i40evf_adminq_task(struct work_struct *work) ...@@ -1588,6 +1590,7 @@ static void i40evf_adminq_task(struct work_struct *work)
struct i40e_arq_event_info event; struct i40e_arq_event_info event;
struct i40e_virtchnl_msg *v_msg; struct i40e_virtchnl_msg *v_msg;
i40e_status ret; i40e_status ret;
u32 val, oldval;
u16 pending; u16 pending;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
...@@ -1615,6 +1618,41 @@ static void i40evf_adminq_task(struct work_struct *work) ...@@ -1615,6 +1618,41 @@ static void i40evf_adminq_task(struct work_struct *work)
} }
} while (pending); } while (pending);
/* check for error indications */
val = rd32(hw, hw->aq.arq.len);
oldval = val;
if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
}
if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
}
if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
}
if (oldval != val)
wr32(hw, hw->aq.arq.len, val);
val = rd32(hw, hw->aq.asq.len);
oldval = val;
if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
}
if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
}
if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
}
if (oldval != val)
wr32(hw, hw->aq.asq.len, val);
/* re-enable Admin queue interrupt cause */ /* re-enable Admin queue interrupt cause */
i40evf_misc_irq_enable(adapter); i40evf_misc_irq_enable(adapter);
...@@ -1785,12 +1823,11 @@ static int i40evf_close(struct net_device *netdev) ...@@ -1785,12 +1823,11 @@ static int i40evf_close(struct net_device *netdev)
if (adapter->state <= __I40EVF_DOWN) if (adapter->state <= __I40EVF_DOWN)
return 0; return 0;
/* signal that we are down to the interrupt handler */
adapter->state = __I40EVF_DOWN;
set_bit(__I40E_DOWN, &adapter->vsi.state); set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_down(adapter); i40evf_down(adapter);
adapter->state = __I40EVF_DOWN;
i40evf_free_traffic_irqs(adapter); i40evf_free_traffic_irqs(adapter);
i40evf_free_all_tx_resources(adapter); i40evf_free_all_tx_resources(adapter);
...@@ -2057,8 +2094,8 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2057,8 +2094,8 @@ static void i40evf_init_task(struct work_struct *work)
adapter->hw.mac.addr); adapter->hw.mac.addr);
random_ether_addr(adapter->hw.mac.addr); random_ether_addr(adapter->hw.mac.addr);
} }
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
INIT_LIST_HEAD(&adapter->mac_filter_list); INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list); INIT_LIST_HEAD(&adapter->vlan_filter_list);
...@@ -2066,7 +2103,7 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2066,7 +2103,7 @@ static void i40evf_init_task(struct work_struct *work)
if (NULL == f) if (NULL == f)
goto err_sw_init; goto err_sw_init;
memcpy(f->macaddr, adapter->hw.mac.addr, ETH_ALEN); ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
f->add = true; f->add = true;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
......
...@@ -233,6 +233,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) ...@@ -233,6 +233,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->txq.queue_id = i; vqpi->txq.queue_id = i;
vqpi->txq.ring_len = adapter->tx_rings[i]->count; vqpi->txq.ring_len = adapter->tx_rings[i]->count;
vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
vqpi->txq.headwb_enabled = 1;
vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
(vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
vqpi->rxq.vsi_id = vqci->vsi_id; vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i; vqpi->rxq.queue_id = i;
...@@ -404,7 +407,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) ...@@ -404,7 +407,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
veal->num_elements = count; veal->num_elements = count;
list_for_each_entry(f, &adapter->mac_filter_list, list) { list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add) { if (f->add) {
memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN); ether_addr_copy(veal->list[i].addr, f->macaddr);
i++; i++;
f->add = false; f->add = false;
} }
...@@ -465,7 +468,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) ...@@ -465,7 +468,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
veal->num_elements = count; veal->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
if (f->remove) { if (f->remove) {
memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN); ether_addr_copy(veal->list[i].addr, f->macaddr);
i++; i++;
list_del(&f->list); list_del(&f->list);
kfree(f); kfree(f);
......
...@@ -155,7 +155,7 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) ...@@ -155,7 +155,7 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
ret_val = igb_check_for_link_82575(hw); ret_val = igb_check_for_link_82575(hw);
} }
return E1000_SUCCESS; return 0;
} }
/** /**
...@@ -1004,7 +1004,6 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) ...@@ -1004,7 +1004,6 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
{ {
struct e1000_phy_info *phy = &hw->phy; struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = 0;
u16 data; u16 data;
data = rd32(E1000_82580_PHY_POWER_MGMT); data = rd32(E1000_82580_PHY_POWER_MGMT);
...@@ -1028,7 +1027,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) ...@@ -1028,7 +1027,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
data &= ~E1000_82580_PM_SPD; } data &= ~E1000_82580_PM_SPD; }
wr32(E1000_82580_PHY_POWER_MGMT, data); wr32(E1000_82580_PHY_POWER_MGMT, data);
return ret_val; return 0;
} }
/** /**
...@@ -1048,7 +1047,6 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) ...@@ -1048,7 +1047,6 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
{ {
struct e1000_phy_info *phy = &hw->phy; struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = 0;
u16 data; u16 data;
data = rd32(E1000_82580_PHY_POWER_MGMT); data = rd32(E1000_82580_PHY_POWER_MGMT);
...@@ -1073,7 +1071,7 @@ static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) ...@@ -1073,7 +1071,7 @@ static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
} }
wr32(E1000_82580_PHY_POWER_MGMT, data); wr32(E1000_82580_PHY_POWER_MGMT, data);
return ret_val; return 0;
} }
/** /**
...@@ -1199,7 +1197,6 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) ...@@ -1199,7 +1197,6 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
{ {
s32 timeout = PHY_CFG_TIMEOUT; s32 timeout = PHY_CFG_TIMEOUT;
s32 ret_val = 0;
u32 mask = E1000_NVM_CFG_DONE_PORT_0; u32 mask = E1000_NVM_CFG_DONE_PORT_0;
if (hw->bus.func == 1) if (hw->bus.func == 1)
...@@ -1223,7 +1220,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) ...@@ -1223,7 +1220,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
(hw->phy.type == e1000_phy_igp_3)) (hw->phy.type == e1000_phy_igp_3))
igb_phy_init_script_igp3(hw); igb_phy_init_script_igp3(hw);
return ret_val; return 0;
} }
/** /**
...@@ -1617,7 +1614,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) ...@@ -1617,7 +1614,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
{ {
u32 ctrl_ext, ctrl_reg, reg, anadv_reg; u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
bool pcs_autoneg; bool pcs_autoneg;
s32 ret_val = E1000_SUCCESS; s32 ret_val = 0;
u16 data; u16 data;
if ((hw->phy.media_type != e1000_media_type_internal_serdes) && if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
...@@ -2518,7 +2515,7 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) ...@@ -2518,7 +2515,7 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
u16 *data, bool read) u16 *data, bool read)
{ {
s32 ret_val = E1000_SUCCESS; s32 ret_val = 0;
ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
if (ret_val) if (ret_val)
...@@ -2552,7 +2549,6 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) ...@@ -2552,7 +2549,6 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
**/ **/
s32 igb_set_eee_i350(struct e1000_hw *hw) s32 igb_set_eee_i350(struct e1000_hw *hw)
{ {
s32 ret_val = 0;
u32 ipcnfg, eeer; u32 ipcnfg, eeer;
if ((hw->mac.type < e1000_i350) || if ((hw->mac.type < e1000_i350) ||
...@@ -2586,7 +2582,7 @@ s32 igb_set_eee_i350(struct e1000_hw *hw) ...@@ -2586,7 +2582,7 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
rd32(E1000_EEER); rd32(E1000_EEER);
out: out:
return ret_val; return 0;
} }
/** /**
...@@ -2713,7 +2709,6 @@ static const u8 e1000_emc_therm_limit[4] = { ...@@ -2713,7 +2709,6 @@ static const u8 e1000_emc_therm_limit[4] = {
**/ **/
static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
{ {
s32 status = E1000_SUCCESS;
u16 ets_offset; u16 ets_offset;
u16 ets_cfg; u16 ets_cfg;
u16 ets_sensor; u16 ets_sensor;
...@@ -2731,7 +2726,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) ...@@ -2731,7 +2726,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
/* Return the internal sensor only if ETS is unsupported */ /* Return the internal sensor only if ETS is unsupported */
hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
return status; return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
...@@ -2755,7 +2750,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) ...@@ -2755,7 +2750,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
E1000_I2C_THERMAL_SENSOR_ADDR, E1000_I2C_THERMAL_SENSOR_ADDR,
&data->sensor[i].temp); &data->sensor[i].temp);
} }
return status; return 0;
} }
/** /**
...@@ -2767,7 +2762,6 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) ...@@ -2767,7 +2762,6 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
**/ **/
static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
{ {
s32 status = E1000_SUCCESS;
u16 ets_offset; u16 ets_offset;
u16 ets_cfg; u16 ets_cfg;
u16 ets_sensor; u16 ets_sensor;
...@@ -2793,7 +2787,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) ...@@ -2793,7 +2787,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
/* Return the internal sensor only if ETS is unsupported */ /* Return the internal sensor only if ETS is unsupported */
hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
return status; return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
...@@ -2824,7 +2818,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) ...@@ -2824,7 +2818,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
low_thresh_delta; low_thresh_delta;
} }
} }
return status; return 0;
} }
#endif #endif
......
...@@ -459,7 +459,6 @@ ...@@ -459,7 +459,6 @@
#define E1000_RAH_POOL_1 0x00040000 #define E1000_RAH_POOL_1 0x00040000
/* Error Codes */ /* Error Codes */
#define E1000_SUCCESS 0
#define E1000_ERR_NVM 1 #define E1000_ERR_NVM 1
#define E1000_ERR_PHY 2 #define E1000_ERR_PHY 2
#define E1000_ERR_CONFIG 3 #define E1000_ERR_CONFIG 3
......
This diff is collapsed.
...@@ -2738,7 +2738,7 @@ static int igb_get_module_info(struct net_device *netdev, ...@@ -2738,7 +2738,7 @@ static int igb_get_module_info(struct net_device *netdev,
{ {
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 status = E1000_SUCCESS; u32 status = 0;
u16 sff8472_rev, addr_mode; u16 sff8472_rev, addr_mode;
bool page_swap = false; bool page_swap = false;
...@@ -2748,12 +2748,12 @@ static int igb_get_module_info(struct net_device *netdev, ...@@ -2748,12 +2748,12 @@ static int igb_get_module_info(struct net_device *netdev,
/* Check whether we support SFF-8472 or not */ /* Check whether we support SFF-8472 or not */
status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
if (status != E1000_SUCCESS) if (status)
return -EIO; return -EIO;
/* addressing mode is not supported */ /* addressing mode is not supported */
status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
if (status != E1000_SUCCESS) if (status)
return -EIO; return -EIO;
/* addressing mode is not supported */ /* addressing mode is not supported */
...@@ -2780,7 +2780,7 @@ static int igb_get_module_eeprom(struct net_device *netdev, ...@@ -2780,7 +2780,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
{ {
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 status = E1000_SUCCESS; u32 status = 0;
u16 *dataword; u16 *dataword;
u16 first_word, last_word; u16 first_word, last_word;
int i = 0; int i = 0;
...@@ -2799,7 +2799,7 @@ static int igb_get_module_eeprom(struct net_device *netdev, ...@@ -2799,7 +2799,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
for (i = 0; i < last_word - first_word + 1; i++) { for (i = 0; i < last_word - first_word + 1; i++) {
status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
if (status != E1000_SUCCESS) { if (status) {
/* Error occurred while reading module */ /* Error occurred while reading module */
kfree(dataword); kfree(dataword);
return -EIO; return -EIO;
......
...@@ -2199,11 +2199,11 @@ static void igb_init_mas(struct igb_adapter *adapter) ...@@ -2199,11 +2199,11 @@ static void igb_init_mas(struct igb_adapter *adapter)
**/ **/
static s32 igb_init_i2c(struct igb_adapter *adapter) static s32 igb_init_i2c(struct igb_adapter *adapter)
{ {
s32 status = E1000_SUCCESS; s32 status = 0;
/* I2C interface supported on i350 devices */ /* I2C interface supported on i350 devices */
if (adapter->hw.mac.type != e1000_i350) if (adapter->hw.mac.type != e1000_i350)
return E1000_SUCCESS; return 0;
/* Initialize the i2c bus which is controlled by the registers. /* Initialize the i2c bus which is controlled by the registers.
* This bus will use the i2c_algo_bit structue that implements * This bus will use the i2c_algo_bit structue that implements
...@@ -7935,7 +7935,7 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, ...@@ -7935,7 +7935,7 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
wr32(reg_offset, reg_val); wr32(reg_offset, reg_val);
adapter->vf_data[vf].spoofchk_enabled = setting; adapter->vf_data[vf].spoofchk_enabled = setting;
return E1000_SUCCESS; return 0;
} }
static int igb_ndo_get_vf_config(struct net_device *netdev, static int igb_ndo_get_vf_config(struct net_device *netdev,
...@@ -8097,8 +8097,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, ...@@ -8097,8 +8097,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
swfw_mask = E1000_SWFW_PHY0_SM; swfw_mask = E1000_SWFW_PHY0_SM;
if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
!= E1000_SUCCESS)
return E1000_ERR_SWFW_SYNC; return E1000_ERR_SWFW_SYNC;
status = i2c_smbus_read_byte_data(this_client, byte_offset); status = i2c_smbus_read_byte_data(this_client, byte_offset);
...@@ -8108,7 +8107,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, ...@@ -8108,7 +8107,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
return E1000_ERR_I2C; return E1000_ERR_I2C;
else { else {
*data = status; *data = status;
return E1000_SUCCESS; return 0;
} }
} }
...@@ -8133,7 +8132,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, ...@@ -8133,7 +8132,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
if (!this_client) if (!this_client)
return E1000_ERR_I2C; return E1000_ERR_I2C;
if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return E1000_ERR_SWFW_SYNC; return E1000_ERR_SWFW_SYNC;
status = i2c_smbus_write_byte_data(this_client, byte_offset, data); status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
hw->mac.ops.release_swfw_sync(hw, swfw_mask); hw->mac.ops.release_swfw_sync(hw, swfw_mask);
...@@ -8141,7 +8140,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, ...@@ -8141,7 +8140,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
if (status) if (status)
return E1000_ERR_I2C; return E1000_ERR_I2C;
else else
return E1000_SUCCESS; return 0;
} }
......
...@@ -559,10 +559,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) ...@@ -559,10 +559,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0; -EFAULT : 0;
} }
/** /**
* igb_ptp_set_ts_config - control hardware time stamping * igb_ptp_set_timestamp_mode - setup hardware for timestamping
* @netdev: * @adapter: networking device structure
* @ifreq: * @config: hwtstamp configuration
* *
* Outgoing time stamping can be enabled and disabled. Play nice and * Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't case any overhead * disable it when requested, although it shouldn't case any overhead
...@@ -575,12 +576,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) ...@@ -575,12 +576,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
* type has to be specified. Matching the kind of event packet is * type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of * not supported, with the exception of "all V2 events regardless of
* level 2 or 4". * level 2 or 4".
**/ */
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
struct hwtstamp_config *config)
{ {
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
struct hwtstamp_config *config = &adapter->tstamp_config;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
u32 tsync_rx_cfg = 0; u32 tsync_rx_cfg = 0;
...@@ -588,9 +588,6 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) ...@@ -588,9 +588,6 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
bool is_l2 = false; bool is_l2 = false;
u32 regval; u32 regval;
if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
return -EFAULT;
/* reserved for future extensions */ /* reserved for future extensions */
if (config->flags) if (config->flags)
return -EINVAL; return -EINVAL;
...@@ -725,7 +722,33 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) ...@@ -725,7 +722,33 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
regval = rd32(E1000_RXSTMPL); regval = rd32(E1000_RXSTMPL);
regval = rd32(E1000_RXSTMPH); regval = rd32(E1000_RXSTMPH);
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? return 0;
}
/**
* igb_ptp_set_ts_config - set hardware time stamping config
* @netdev:
* @ifreq:
*
**/
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct hwtstamp_config config;
int err;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
err = igb_ptp_set_timestamp_mode(adapter, &config);
if (err)
return err;
/* save these settings for future reference */
memcpy(&adapter->tstamp_config, &config,
sizeof(adapter->tstamp_config));
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0; -EFAULT : 0;
} }
...@@ -820,6 +843,9 @@ void igb_ptp_init(struct igb_adapter *adapter) ...@@ -820,6 +843,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
wr32(E1000_IMS, E1000_IMS_TS); wr32(E1000_IMS, E1000_IMS_TS);
} }
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev); &adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) { if (IS_ERR(adapter->ptp_clock)) {
...@@ -884,7 +910,7 @@ void igb_ptp_reset(struct igb_adapter *adapter) ...@@ -884,7 +910,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
return; return;
/* reset the tstamp_config */ /* reset the tstamp_config */
memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config)); igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
switch (adapter->hw.mac.type) { switch (adapter->hw.mac.type) {
case e1000_82576: case e1000_82576:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment