Commit 0c32ec8f authored by David S. Miller's avatar David S. Miller

Merge branch 'bnx2x-next'

Yuval Mintz says:

====================
bnx2x: Start utilizing 7.10.51

This series will enable bnx2x to start utlizing its 7.10.51 FW.
In addition, it will also add timestamping support, as well as a couple
of routine semantic cleanups.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c0b80236 97539f1e
...@@ -20,13 +20,17 @@ ...@@ -20,13 +20,17 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/pci_regs.h> #include <linux/pci_regs.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/clocksource.h>
/* compilation time flags */ /* compilation time flags */
/* define this to make the driver freeze on error to allow getting debug info /* define this to make the driver freeze on error to allow getting debug info
* (you will need to reboot afterwards) */ * (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */ /* #define BNX2X_STOP_ON_ERROR */
#define DRV_MODULE_VERSION "1.78.19-0" #define DRV_MODULE_VERSION "1.710.51-0"
#define DRV_MODULE_RELDATE "2014/02/10" #define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200 #define BNX2X_BC_VER 0x040200
...@@ -70,6 +74,7 @@ enum bnx2x_int_mode { ...@@ -70,6 +74,7 @@ enum bnx2x_int_mode {
#define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */ #define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */
#define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */ #define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */
#define BNX2X_MSG_IOV 0x0800000 #define BNX2X_MSG_IOV 0x0800000
#define BNX2X_MSG_PTP 0x1000000
#define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/ #define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/
#define BNX2X_MSG_ETHTOOL 0x4000000 #define BNX2X_MSG_ETHTOOL 0x4000000
#define BNX2X_MSG_DCB 0x8000000 #define BNX2X_MSG_DCB 0x8000000
...@@ -1587,10 +1592,11 @@ struct bnx2x { ...@@ -1587,10 +1592,11 @@ struct bnx2x {
#define USING_SINGLE_MSIX_FLAG (1 << 20) #define USING_SINGLE_MSIX_FLAG (1 << 20)
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
#define IS_VF_FLAG (1 << 22) #define IS_VF_FLAG (1 << 22)
#define INTERRUPTS_ENABLED_FLAG (1 << 23) #define BC_SUPPORTS_RMMOD_CMD (1 << 23)
#define BC_SUPPORTS_RMMOD_CMD (1 << 24) #define HAS_PHYS_PORT_ID (1 << 24)
#define HAS_PHYS_PORT_ID (1 << 25) #define AER_ENABLED (1 << 25)
#define AER_ENABLED (1 << 26) #define PTP_SUPPORTED (1 << 26)
#define TX_TIMESTAMPING_EN (1 << 27)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
...@@ -1684,13 +1690,9 @@ struct bnx2x { ...@@ -1684,13 +1690,9 @@ struct bnx2x {
#define BNX2X_STATE_ERROR 0xf000 #define BNX2X_STATE_ERROR 0xf000
#define BNX2X_MAX_PRIORITY 8 #define BNX2X_MAX_PRIORITY 8
#define BNX2X_MAX_ENTRIES_PER_PRI 16
#define BNX2X_MAX_COS 3
#define BNX2X_MAX_TX_COS 2
int num_queues; int num_queues;
uint num_ethernet_queues; uint num_ethernet_queues;
uint num_cnic_queues; uint num_cnic_queues;
int num_napi_queues;
int disable_tpa; int disable_tpa;
u32 rx_mode; u32 rx_mode;
...@@ -1933,6 +1935,19 @@ struct bnx2x { ...@@ -1933,6 +1935,19 @@ struct bnx2x {
u8 phys_port_id[ETH_ALEN]; u8 phys_port_id[ETH_ALEN];
/* PTP related context */
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct work_struct ptp_task;
struct cyclecounter cyclecounter;
struct timecounter timecounter;
bool timecounter_init_done;
struct sk_buff *ptp_tx_skb;
unsigned long ptp_tx_start;
bool hwtstamp_ioctl_called;
u16 tx_type;
u16 rx_filter;
struct bnx2x_link_report_data vf_link_vars; struct bnx2x_link_report_data vf_link_vars;
}; };
...@@ -2559,4 +2574,11 @@ void bnx2x_update_mng_version(struct bnx2x *bp); ...@@ -2559,4 +2574,11 @@ void bnx2x_update_mng_version(struct bnx2x *bp);
#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) #define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
void bnx2x_init_ptp(struct bnx2x *bp);
int bnx2x_configure_ptp_filters(struct bnx2x *bp);
void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
#define BNX2X_MAX_PHC_DRIFT 31000000
#define BNX2X_PTP_TX_TIMEOUT
#endif /* bnx2x.h */ #endif /* bnx2x.h */
...@@ -1067,6 +1067,11 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) ...@@ -1067,6 +1067,11 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
skb_record_rx_queue(skb, fp->rx_queue); skb_record_rx_queue(skb, fp->rx_queue);
/* Check if this packet was timestamped */
if (unlikely(le16_to_cpu(cqe->fast_path_cqe.type_error_flags) &
(1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
bnx2x_set_rx_ts(bp, skb);
if (le16_to_cpu(cqe_fp->pars_flags.flags) & if (le16_to_cpu(cqe_fp->pars_flags.flags) &
PARSING_FLAGS_VLAN) PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
...@@ -2082,6 +2087,10 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, ...@@ -2082,6 +2087,10 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags); __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
if (rss_obj->udp_rss_v6) if (rss_obj->udp_rss_v6)
__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags); __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
if (!CHIP_IS_E1x(bp))
/* valid only for TUNN_MODE_GRE tunnel mode */
__set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
} else { } else {
__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags); __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
} }
...@@ -2804,7 +2813,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) ...@@ -2804,7 +2813,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Initialize Rx filter. */ /* Initialize Rx filter. */
bnx2x_set_rx_mode_inner(bp); bnx2x_set_rx_mode_inner(bp);
/* Start the Tx */ if (bp->flags & PTP_SUPPORTED) {
bnx2x_init_ptp(bp);
bnx2x_configure_ptp_filters(bp);
}
/* Start Tx */
switch (load_mode) { switch (load_mode) {
case LOAD_NORMAL: case LOAD_NORMAL:
/* Tx queue should be only re-enabled */ /* Tx queue should be only re-enabled */
...@@ -3441,26 +3454,6 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, ...@@ -3441,26 +3454,6 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
} }
#endif #endif
static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
u32 xmit_type)
{
struct ipv6hdr *ipv6;
*parsing_data |= (skb_shinfo(skb)->gso_size <<
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
ETH_TX_PARSE_BD_E2_LSO_MSS;
if (xmit_type & XMIT_GSO_ENC_V6)
ipv6 = inner_ipv6_hdr(skb);
else if (xmit_type & XMIT_GSO_V6)
ipv6 = ipv6_hdr(skb);
else
ipv6 = NULL;
if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
/** /**
* bnx2x_set_pbd_gso - update PBD in GSO case. * bnx2x_set_pbd_gso - update PBD in GSO case.
* *
...@@ -3470,7 +3463,6 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, ...@@ -3470,7 +3463,6 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
*/ */
static void bnx2x_set_pbd_gso(struct sk_buff *skb, static void bnx2x_set_pbd_gso(struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd, struct eth_tx_parse_bd_e1x *pbd,
struct eth_tx_start_bd *tx_start_bd,
u32 xmit_type) u32 xmit_type)
{ {
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
...@@ -3483,9 +3475,6 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb, ...@@ -3483,9 +3475,6 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb,
bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0)); 0, IPPROTO_TCP, 0));
/* GSO on 57710/57711 needs FW to calculate IP checksum */
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
} else { } else {
pbd->tcp_pseudo_csum = pbd->tcp_pseudo_csum =
bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
...@@ -3657,18 +3646,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, ...@@ -3657,18 +3646,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
(__force u32)iph->tot_len - (__force u32)iph->tot_len -
(__force u32)iph->frag_off; (__force u32)iph->frag_off;
outerip_len = iph->ihl << 1;
pbd2->fw_ip_csum_wo_len_flags_frag = pbd2->fw_ip_csum_wo_len_flags_frag =
bswab16(csum_fold((__force __wsum)csum)); bswab16(csum_fold((__force __wsum)csum));
} else { } else {
pbd2->fw_ip_hdr_to_payload_w = pbd2->fw_ip_hdr_to_payload_w =
hlen_w - ((sizeof(struct ipv6hdr)) >> 1); hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
pbd_e2->data.tunnel_data.flags |=
1 /*IPv6*/ << ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
} }
pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
if (xmit_type & XMIT_GSO_V4) { /* inner IP header info */
if (xmit_type & XMIT_CSUM_ENC_V4) {
pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
pbd_e2->data.tunnel_data.pseudo_csum = pbd_e2->data.tunnel_data.pseudo_csum =
...@@ -3676,8 +3670,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, ...@@ -3676,8 +3670,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
inner_ip_hdr(skb)->saddr, inner_ip_hdr(skb)->saddr,
inner_ip_hdr(skb)->daddr, inner_ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0)); 0, IPPROTO_TCP, 0));
outerip_len = ip_hdr(skb)->ihl << 1;
} else { } else {
pbd_e2->data.tunnel_data.pseudo_csum = pbd_e2->data.tunnel_data.pseudo_csum =
bswab16(~csum_ipv6_magic( bswab16(~csum_ipv6_magic(
...@@ -3690,8 +3682,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, ...@@ -3690,8 +3682,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
*global_data |= *global_data |=
outerip_off | outerip_off |
(!!(xmit_type & XMIT_CSUM_V6) <<
ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
(outerip_len << (outerip_len <<
ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
...@@ -3703,6 +3693,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, ...@@ -3703,6 +3693,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
} }
} }
static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
u32 xmit_type)
{
struct ipv6hdr *ipv6;
if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
return;
if (xmit_type & XMIT_GSO_ENC_V6)
ipv6 = inner_ipv6_hdr(skb);
else /* XMIT_GSO_V6 */
ipv6 = ipv6_hdr(skb);
if (ipv6->nexthdr == NEXTHDR_IPV6)
*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
/* called with netif_tx_lock /* called with netif_tx_lock
* bnx2x_tx_int() runs without netif_tx_lock unless it needs to call * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue() * netif_wake_queue()
...@@ -3835,6 +3842,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3835,6 +3842,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
if (!(bp->flags & TX_TIMESTAMPING_EN)) {
BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
} else if (bp->ptp_tx_skb) {
BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
} else {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
/* schedule check for Tx timestamp */
bp->ptp_tx_skb = skb_get(skb);
bp->ptp_tx_start = jiffies;
schedule_work(&bp->ptp_task);
}
}
/* header nbd: indirectly zero other flags! */ /* header nbd: indirectly zero other flags! */
tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
...@@ -3919,6 +3940,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3919,6 +3940,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
xmit_type); xmit_type);
} }
bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
/* Add the macs to the parsing BD if this is a vf or if /* Add the macs to the parsing BD if this is a vf or if
* Tx Switching is enabled. * Tx Switching is enabled.
*/ */
...@@ -3984,10 +4006,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3984,10 +4006,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
bd_prod); bd_prod);
} }
if (!CHIP_IS_E1x(bp)) if (!CHIP_IS_E1x(bp))
bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, pbd_e2_parsing_data |=
xmit_type); (skb_shinfo(skb)->gso_size <<
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
ETH_TX_PARSE_BD_E2_LSO_MSS;
else else
bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type); bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
} }
/* Set the PBD's parsing_data field if not zero /* Set the PBD's parsing_data field if not zero
......
...@@ -932,8 +932,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp) ...@@ -932,8 +932,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */ else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR; start_params->network_cos_mode = FW_WRR;
start_params->gre_tunnel_mode = L2GRE_TUNNEL; start_params->tunnel_mode = TUNN_MODE_GRE;
start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS; start_params->gre_tunnel_type = IPGRE_TUNNEL;
start_params->inner_gre_rss_en = 1;
return bnx2x_func_state_change(bp, &func_params); return bnx2x_func_state_change(bp, &func_params);
} }
......
...@@ -2092,7 +2092,6 @@ static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, ...@@ -2092,7 +2092,6 @@ static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
{ {
struct bnx2x *bp = netdev_priv(netdev); struct bnx2x *bp = netdev_priv(netdev);
int rc = 0;
DP(BNX2X_MSG_DCB, "SET-ALL\n"); DP(BNX2X_MSG_DCB, "SET-ALL\n");
...@@ -2110,9 +2109,7 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) ...@@ -2110,9 +2109,7 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
1); 1);
bnx2x_dcbx_init(bp, true); bnx2x_dcbx_init(bp, true);
} }
DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); DP(BNX2X_MSG_DCB, "set_dcbx_params done\n");
if (rc)
return 1;
return 0; return 0;
} }
......
...@@ -3481,6 +3481,46 @@ static int bnx2x_set_channels(struct net_device *dev, ...@@ -3481,6 +3481,46 @@ static int bnx2x_set_channels(struct net_device *dev,
return bnx2x_nic_load(bp, LOAD_NORMAL); return bnx2x_nic_load(bp, LOAD_NORMAL);
} }
static int bnx2x_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
struct bnx2x *bp = netdev_priv(dev);
if (bp->flags & PTP_SUPPORTED) {
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (bp->ptp_clock)
info->phc_index = ptp_clock_index(bp->ptp_clock);
else
info->phc_index = -1;
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
return 0;
}
return ethtool_op_get_ts_info(dev, info);
}
static const struct ethtool_ops bnx2x_ethtool_ops = { static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_settings = bnx2x_get_settings, .get_settings = bnx2x_get_settings,
.set_settings = bnx2x_set_settings, .set_settings = bnx2x_set_settings,
...@@ -3522,7 +3562,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { ...@@ -3522,7 +3562,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_module_eeprom = bnx2x_get_module_eeprom, .get_module_eeprom = bnx2x_get_module_eeprom,
.get_eee = bnx2x_get_eee, .get_eee = bnx2x_get_eee,
.set_eee = bnx2x_set_eee, .set_eee = bnx2x_set_eee,
.get_ts_info = ethtool_op_get_ts_info, .get_ts_info = bnx2x_get_ts_info,
}; };
static const struct ethtool_ops bnx2x_vf_ethtool_ops = { static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
......
...@@ -4019,6 +4019,7 @@ static int bnx2x_setup_rss(struct bnx2x *bp, ...@@ -4019,6 +4019,7 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw; struct bnx2x_raw_obj *r = &o->raw;
struct eth_rss_update_ramrod_data *data = struct eth_rss_update_ramrod_data *data =
(struct eth_rss_update_ramrod_data *)(r->rdata); (struct eth_rss_update_ramrod_data *)(r->rdata);
u16 caps = 0;
u8 rss_mode = 0; u8 rss_mode = 0;
int rc; int rc;
...@@ -4042,28 +4043,27 @@ static int bnx2x_setup_rss(struct bnx2x *bp, ...@@ -4042,28 +4043,27 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
/* RSS capabilities */ /* RSS capabilities */
if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
data->capabilities |= caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
data->capabilities |= caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
data->capabilities |= caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
data->capabilities |= caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
data->capabilities |= caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
data->capabilities |= caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
data->capabilities = cpu_to_le16(caps);
/* Hashing mask */ /* Hashing mask */
data->rss_result_mask = p->rss_result_mask; data->rss_result_mask = p->rss_result_mask;
...@@ -4336,6 +4336,8 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, ...@@ -4336,6 +4336,8 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
test_bit(BNX2X_Q_FLG_FCOE, flags) ? test_bit(BNX2X_Q_FLG_FCOE, flags) ?
LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION;
DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n", DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
} }
...@@ -4357,12 +4359,13 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, ...@@ -4357,12 +4359,13 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
tx_data->force_default_pri_flg = tx_data->force_default_pri_flg =
test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
tx_data->refuse_outband_vlan_flg =
test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
tx_data->tunnel_lso_inc_ip_id = tx_data->tunnel_lso_inc_ip_id =
test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags); test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
tx_data->tunnel_non_lso_pcsum_location = tx_data->tunnel_non_lso_pcsum_location =
test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT : test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
PCSUM_ON_BD; CSUM_ON_BD;
tx_data->tx_status_block_id = params->fw_sb_id; tx_data->tx_status_block_id = params->fw_sb_id;
tx_data->tx_sb_index_number = params->sb_cq_index; tx_data->tx_sb_index_number = params->sb_cq_index;
...@@ -4722,6 +4725,12 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp, ...@@ -4722,6 +4725,12 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,
data->tx_switching_change_flg = data->tx_switching_change_flg =
test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
&params->update_flags); &params->update_flags);
/* PTP */
data->handle_ptp_pkts_flg =
test_bit(BNX2X_Q_UPDATE_PTP_PKTS, &params->update_flags);
data->handle_ptp_pkts_change_flg =
test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, &params->update_flags);
} }
static inline int bnx2x_q_send_update(struct bnx2x *bp, static inline int bnx2x_q_send_update(struct bnx2x *bp,
...@@ -5376,6 +5385,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, ...@@ -5376,6 +5385,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
(!test_bit(BNX2X_F_CMD_STOP, &o->pending))) (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
next_state = BNX2X_F_STATE_STARTED; next_state = BNX2X_F_STATE_STARTED;
else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
(!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
next_state = BNX2X_F_STATE_STARTED;
else if (cmd == BNX2X_F_CMD_TX_STOP) else if (cmd == BNX2X_F_CMD_TX_STOP)
next_state = BNX2X_F_STATE_TX_STOPPED; next_state = BNX2X_F_STATE_TX_STOPPED;
...@@ -5385,6 +5398,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, ...@@ -5385,6 +5398,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
(!test_bit(BNX2X_F_CMD_STOP, &o->pending))) (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
next_state = BNX2X_F_STATE_TX_STOPPED; next_state = BNX2X_F_STATE_TX_STOPPED;
else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
(!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
next_state = BNX2X_F_STATE_TX_STOPPED;
else if (cmd == BNX2X_F_CMD_TX_START) else if (cmd == BNX2X_F_CMD_TX_START)
next_state = BNX2X_F_STATE_STARTED; next_state = BNX2X_F_STATE_STARTED;
...@@ -5652,8 +5669,11 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, ...@@ -5652,8 +5669,11 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
rdata->path_id = BP_PATH(bp); rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode; rdata->network_cos_mode = start_params->network_cos_mode;
rdata->gre_tunnel_mode = start_params->gre_tunnel_mode; rdata->tunnel_mode = start_params->tunnel_mode;
rdata->gre_tunnel_rss = start_params->gre_tunnel_rss; rdata->gre_tunnel_type = start_params->gre_tunnel_type;
rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
rdata->vxlan_dst_port = cpu_to_le16(4789);
rdata->sd_vlan_eth_type = cpu_to_le16(0x8100);
/* No need for an explicit memory barrier here as long we would /* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element * need to ensure the ordering of writing to the SPQ element
...@@ -5680,8 +5700,28 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, ...@@ -5680,8 +5700,28 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata)); memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */ /* Fill the ramrod data with provided parameters */
rdata->tx_switch_suspend_change_flg = 1; if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
rdata->tx_switch_suspend = switch_update_params->suspend; &switch_update_params->changes)) {
rdata->tx_switch_suspend_change_flg = 1;
rdata->tx_switch_suspend =
test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
&switch_update_params->changes);
}
if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
&switch_update_params->changes)) {
rdata->update_tunn_cfg_flg = 1;
if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
&switch_update_params->changes))
rdata->tunn_clss_en = 1;
if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
&switch_update_params->changes))
rdata->inner_gre_rss_en = 1;
rdata->tunnel_mode = switch_update_params->tunnel_mode;
rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
rdata->vxlan_dst_port = cpu_to_le16(4789);
}
rdata->echo = SWITCH_UPDATE; rdata->echo = SWITCH_UPDATE;
/* No need for an explicit memory barrier here as long as we /* No need for an explicit memory barrier here as long as we
...@@ -5817,6 +5857,40 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, ...@@ -5817,6 +5857,40 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
U64_LO(data_mapping), NONE_CONNECTION_TYPE); U64_LO(data_mapping), NONE_CONNECTION_TYPE);
} }
static inline
int bnx2x_func_send_set_timesync(struct bnx2x *bp,
struct bnx2x_func_state_params *params)
{
struct bnx2x_func_sp_obj *o = params->f_obj;
struct set_timesync_ramrod_data *rdata =
(struct set_timesync_ramrod_data *)o->rdata;
dma_addr_t data_mapping = o->rdata_mapping;
struct bnx2x_func_set_timesync_params *set_timesync_params =
&params->params.set_timesync;
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd;
rdata->offset_cmd = set_timesync_params->offset_cmd;
rdata->add_sub_drift_adjust_value =
set_timesync_params->add_sub_drift_adjust_value;
rdata->drift_adjust_value = set_timesync_params->drift_adjust_value;
rdata->drift_adjust_period = set_timesync_params->drift_adjust_period;
rdata->offset_delta.lo = U64_LO(set_timesync_params->offset_delta);
rdata->offset_delta.hi = U64_HI(set_timesync_params->offset_delta);
DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
rdata->drift_adjust_cmd, rdata->offset_cmd,
rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value,
rdata->drift_adjust_period, rdata->offset_delta.lo,
rdata->offset_delta.hi);
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
}
static int bnx2x_func_send_cmd(struct bnx2x *bp, static int bnx2x_func_send_cmd(struct bnx2x *bp,
struct bnx2x_func_state_params *params) struct bnx2x_func_state_params *params)
{ {
...@@ -5839,6 +5913,8 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp, ...@@ -5839,6 +5913,8 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
return bnx2x_func_send_tx_start(bp, params); return bnx2x_func_send_tx_start(bp, params);
case BNX2X_F_CMD_SWITCH_UPDATE: case BNX2X_F_CMD_SWITCH_UPDATE:
return bnx2x_func_send_switch_update(bp, params); return bnx2x_func_send_switch_update(bp, params);
case BNX2X_F_CMD_SET_TIMESYNC:
return bnx2x_func_send_set_timesync(bp, params);
default: default:
BNX2X_ERR("Unknown command: %d\n", params->cmd); BNX2X_ERR("Unknown command: %d\n", params->cmd);
return -EINVAL; return -EINVAL;
......
...@@ -711,6 +711,7 @@ enum { ...@@ -711,6 +711,7 @@ enum {
BNX2X_RSS_IPV6, BNX2X_RSS_IPV6,
BNX2X_RSS_IPV6_TCP, BNX2X_RSS_IPV6_TCP,
BNX2X_RSS_IPV6_UDP, BNX2X_RSS_IPV6_UDP,
BNX2X_RSS_GRE_INNER_HDRS,
}; };
struct bnx2x_config_rss_params { struct bnx2x_config_rss_params {
...@@ -769,7 +770,9 @@ enum { ...@@ -769,7 +770,9 @@ enum {
BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
BNX2X_Q_UPDATE_SILENT_VLAN_REM, BNX2X_Q_UPDATE_SILENT_VLAN_REM,
BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
BNX2X_Q_UPDATE_TX_SWITCHING BNX2X_Q_UPDATE_TX_SWITCHING,
BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
BNX2X_Q_UPDATE_PTP_PKTS,
}; };
/* Allowed Queue states */ /* Allowed Queue states */
...@@ -831,6 +834,7 @@ enum { ...@@ -831,6 +834,7 @@ enum {
BNX2X_Q_FLG_ANTI_SPOOF, BNX2X_Q_FLG_ANTI_SPOOF,
BNX2X_Q_FLG_SILENT_VLAN_REM, BNX2X_Q_FLG_SILENT_VLAN_REM,
BNX2X_Q_FLG_FORCE_DEFAULT_PRI, BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN,
BNX2X_Q_FLG_PCSUM_ON_PKT, BNX2X_Q_FLG_PCSUM_ON_PKT,
BNX2X_Q_FLG_TUN_INC_INNER_IP_ID BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
}; };
...@@ -851,6 +855,10 @@ enum bnx2x_q_type { ...@@ -851,6 +855,10 @@ enum bnx2x_q_type {
#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */ #define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) #define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
/* DMAE channel to be used by FW for timesync workaroun. A driver that sends
* timesync-related ramrods must not use this DMAE command ID.
*/
#define FW_DMAE_CMD_ID 6
struct bnx2x_queue_init_params { struct bnx2x_queue_init_params {
struct { struct {
...@@ -1085,6 +1093,16 @@ struct bnx2x_queue_sp_obj { ...@@ -1085,6 +1093,16 @@ struct bnx2x_queue_sp_obj {
}; };
/********************** Function state update *********************************/ /********************** Function state update *********************************/
/* UPDATE command options */
enum {
BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
};
/* Allowed Function states */ /* Allowed Function states */
enum bnx2x_func_state { enum bnx2x_func_state {
BNX2X_F_STATE_RESET, BNX2X_F_STATE_RESET,
...@@ -1105,6 +1123,7 @@ enum bnx2x_func_cmd { ...@@ -1105,6 +1123,7 @@ enum bnx2x_func_cmd {
BNX2X_F_CMD_TX_STOP, BNX2X_F_CMD_TX_STOP,
BNX2X_F_CMD_TX_START, BNX2X_F_CMD_TX_START,
BNX2X_F_CMD_SWITCH_UPDATE, BNX2X_F_CMD_SWITCH_UPDATE,
BNX2X_F_CMD_SET_TIMESYNC,
BNX2X_F_CMD_MAX, BNX2X_F_CMD_MAX,
}; };
...@@ -1146,18 +1165,25 @@ struct bnx2x_func_start_params { ...@@ -1146,18 +1165,25 @@ struct bnx2x_func_start_params {
/* Function cos mode */ /* Function cos mode */
u8 network_cos_mode; u8 network_cos_mode;
/* NVGRE classification enablement */ /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */
u8 nvgre_clss_en; u8 tunnel_mode;
/* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ /* tunneling classification enablement */
u8 gre_tunnel_mode; u8 tunn_clss_en;
/* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */ /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
u8 gre_tunnel_rss; u8 gre_tunnel_type;
/* Enables Inner GRE RSS on the function, depends on the client RSS
* capailities
*/
u8 inner_gre_rss_en;
}; };
struct bnx2x_func_switch_update_params { struct bnx2x_func_switch_update_params {
u8 suspend; unsigned long changes; /* BNX2X_F_UPDATE_XX bits */
u8 tunnel_mode;
u8 gre_tunnel_type;
}; };
struct bnx2x_func_afex_update_params { struct bnx2x_func_afex_update_params {
...@@ -1172,6 +1198,7 @@ struct bnx2x_func_afex_viflists_params { ...@@ -1172,6 +1198,7 @@ struct bnx2x_func_afex_viflists_params {
u8 afex_vif_list_command; u8 afex_vif_list_command;
u8 func_to_clear; u8 func_to_clear;
}; };
struct bnx2x_func_tx_start_params { struct bnx2x_func_tx_start_params {
struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
u8 dcb_enabled; u8 dcb_enabled;
...@@ -1179,6 +1206,24 @@ struct bnx2x_func_tx_start_params { ...@@ -1179,6 +1206,24 @@ struct bnx2x_func_tx_start_params {
u8 dont_add_pri_0_en; u8 dont_add_pri_0_en;
}; };
struct bnx2x_func_set_timesync_params {
/* Reset, set or keep the current drift value */
u8 drift_adjust_cmd;
/* Dec, inc or keep the current offset */
u8 offset_cmd;
/* Drift value direction */
u8 add_sub_drift_adjust_value;
/* Drift, period and offset values to be used according to the commands
* above.
*/
u8 drift_adjust_value;
u32 drift_adjust_period;
u64 offset_delta;
};
struct bnx2x_func_state_params { struct bnx2x_func_state_params {
struct bnx2x_func_sp_obj *f_obj; struct bnx2x_func_sp_obj *f_obj;
...@@ -1197,6 +1242,7 @@ struct bnx2x_func_state_params { ...@@ -1197,6 +1242,7 @@ struct bnx2x_func_state_params {
struct bnx2x_func_afex_update_params afex_update; struct bnx2x_func_afex_update_params afex_update;
struct bnx2x_func_afex_viflists_params afex_viflists; struct bnx2x_func_afex_viflists_params afex_viflists;
struct bnx2x_func_tx_start_params tx_start; struct bnx2x_func_tx_start_params tx_start;
struct bnx2x_func_set_timesync_params set_timesync;
} params; } params;
}; };
......
...@@ -1125,7 +1125,7 @@ static int bnx2x_ari_enabled(struct pci_dev *dev) ...@@ -1125,7 +1125,7 @@ static int bnx2x_ari_enabled(struct pci_dev *dev)
return dev->bus->self && dev->bus->self->ari_enabled; return dev->bus->self && dev->bus->self->ari_enabled;
} }
static void static int
bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
{ {
int sb_id; int sb_id;
...@@ -1150,6 +1150,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) ...@@ -1150,6 +1150,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
} }
DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
return BP_VFDB(bp)->vf_sbs_pool;
} }
static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
...@@ -1314,15 +1315,17 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, ...@@ -1314,15 +1315,17 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
} }
/* re-read the IGU CAM for VFs - index and abs_vfid must be set */ /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
bnx2x_get_vf_igu_cam_info(bp); if (!bnx2x_get_vf_igu_cam_info(bp)) {
BNX2X_ERR("No entries in IGU CAM for vfs\n");
err = -EINVAL;
goto failed;
}
/* allocate the queue arrays for all VFs */ /* allocate the queue arrays for all VFs */
bp->vfdb->vfqs = kzalloc( bp->vfdb->vfqs = kzalloc(
BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
GFP_KERNEL); GFP_KERNEL);
DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
if (!bp->vfdb->vfqs) { if (!bp->vfdb->vfqs) {
BNX2X_ERR("failed to allocate vf queue array\n"); BNX2X_ERR("failed to allocate vf queue array\n");
err = -ENOMEM; err = -ENOMEM;
...@@ -1349,9 +1352,7 @@ void bnx2x_iov_remove_one(struct bnx2x *bp) ...@@ -1349,9 +1352,7 @@ void bnx2x_iov_remove_one(struct bnx2x *bp)
if (!IS_SRIOV(bp)) if (!IS_SRIOV(bp))
return; return;
DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); bnx2x_disable_sriov(bp);
pci_disable_sriov(bp->pdev);
DP(BNX2X_MSG_IOV, "sriov disabled\n");
/* disable access to all VFs */ /* disable access to all VFs */
for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
...@@ -1985,21 +1986,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) ...@@ -1985,21 +1986,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
} }
static inline
struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
{
int i;
struct bnx2x_virtf *vf = NULL;
for_each_vf(bp, i) {
vf = BP_VF(bp, i);
if (stat_id >= vf->igu_base_id &&
stat_id < vf->igu_base_id + vf_sb_count(vf))
break;
}
return vf;
}
/* VF API helpers */ /* VF API helpers */
static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
u8 enable) u8 enable)
...@@ -2362,12 +2348,6 @@ int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -2362,12 +2348,6 @@ int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
return rc; return rc;
} }
static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
struct bnx2x_virtf *vf, u32 *sbdf)
{
*sbdf = vf->devfn | (vf->bus << 8);
}
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv) enum channel_tlvs tlv)
{ {
...@@ -2416,7 +2396,7 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -2416,7 +2396,7 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* log the unlock */ /* log the unlock */
DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
vf->abs_vfid, vf->op_current); vf->abs_vfid, current_tlv);
} }
static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
...@@ -2501,7 +2481,7 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) ...@@ -2501,7 +2481,7 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
bp->requested_nr_virtfn = num_vfs_param; bp->requested_nr_virtfn = num_vfs_param;
if (num_vfs_param == 0) { if (num_vfs_param == 0) {
bnx2x_set_pf_tx_switching(bp, false); bnx2x_set_pf_tx_switching(bp, false);
pci_disable_sriov(dev); bnx2x_disable_sriov(bp);
return 0; return 0;
} else { } else {
return bnx2x_enable_sriov(bp); return bnx2x_enable_sriov(bp);
...@@ -2614,6 +2594,12 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) ...@@ -2614,6 +2594,12 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
void bnx2x_disable_sriov(struct bnx2x *bp) void bnx2x_disable_sriov(struct bnx2x *bp)
{ {
if (pci_vfs_assigned(bp->pdev)) {
DP(BNX2X_MSG_IOV,
"Unloading driver while VFs are assigned - VFs will not be deallocated\n");
return;
}
pci_disable_sriov(bp->pdev); pci_disable_sriov(bp->pdev);
} }
...@@ -2628,7 +2614,7 @@ static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, ...@@ -2628,7 +2614,7 @@ static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
} }
if (!IS_SRIOV(bp)) { if (!IS_SRIOV(bp)) {
BNX2X_ERR("sriov is disabled - can't utilize iov-realted functionality\n"); BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -299,7 +299,8 @@ struct bnx2x_vfdb { ...@@ -299,7 +299,8 @@ struct bnx2x_vfdb {
#define BP_VFDB(bp) ((bp)->vfdb) #define BP_VFDB(bp) ((bp)->vfdb)
/* vf array */ /* vf array */
struct bnx2x_virtf *vfs; struct bnx2x_virtf *vfs;
#define BP_VF(bp, idx) (&((bp)->vfdb->vfs[idx])) #define BP_VF(bp, idx) ((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \
&((bp)->vfdb->vfs[idx]) : NULL)
#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var) #define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var)
/* queue array - for all vfs */ /* queue array - for all vfs */
......
...@@ -137,7 +137,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) ...@@ -137,7 +137,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
cpu_to_le16(bp->stats_counter++); cpu_to_le16(bp->stats_counter++);
DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
bp->fw_stats_req->hdr.drv_stats_counter); le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
/* adjust the ramrod to include VF queues statistics */ /* adjust the ramrod to include VF queues statistics */
bnx2x_iov_adjust_stats_req(bp); bnx2x_iov_adjust_stats_req(bp);
...@@ -200,7 +200,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) ...@@ -200,7 +200,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
} }
} }
static int bnx2x_stats_comp(struct bnx2x *bp) static void bnx2x_stats_comp(struct bnx2x *bp)
{ {
u32 *stats_comp = bnx2x_sp(bp, stats_comp); u32 *stats_comp = bnx2x_sp(bp, stats_comp);
int cnt = 10; int cnt = 10;
...@@ -214,7 +214,6 @@ static int bnx2x_stats_comp(struct bnx2x *bp) ...@@ -214,7 +214,6 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
cnt--; cnt--;
usleep_range(1000, 2000); usleep_range(1000, 2000);
} }
return 1;
} }
/* /*
......
...@@ -583,7 +583,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -583,7 +583,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
flags |= VFPF_QUEUE_FLG_STATS; flags |= VFPF_QUEUE_FLG_STATS;
flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
flags |= VFPF_QUEUE_FLG_VLAN; flags |= VFPF_QUEUE_FLG_VLAN;
DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
/* Common */ /* Common */
req->vf_qid = fp_idx; req->vf_qid = fp_idx;
...@@ -952,14 +951,6 @@ static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid) ...@@ -952,14 +951,6 @@ static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
REG_WR8(bp, addr, 1); REG_WR8(bp, addr, 1);
} }
static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
{
int i;
for_each_vf(bp, i)
storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
}
/* enable vf_pf mailbox (aka vf-pf-channel) */ /* enable vf_pf mailbox (aka vf-pf-channel) */
void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment