Commit e42780b6 authored by Dmitry Kravkov's avatar Dmitry Kravkov Committed by David S. Miller

bnx2x: Utilize FW 7.10.51

 - (L2) In some multi-function configurations, inter-PF and inter-VF
   Tx switching is incorrectly enabled.

 - (L2) Wrong assert code in FLR final cleanup in case it is sent not
   after FLR.

 - (L2) Chip may stall in very rare cases under heavy traffic with FW GRO
   enabled.

 - (L2) VF malicious notification error fixes.

 - (L2) Default gre tunnel to IPGRE which allows proper RSS for IPGRE packets,
   L2GRE traffic will reach single queue.

 - (FCoE) Fix data being placed in wrong buffer when corrupt FCoE frame is
   received.

 - (FCoE) Burst of FIP packets with destination MAC of ALL-FCF_MACs
   causes FCoE traffic to stop.
Signed-off-by: default avatarDmitry Kravkov <Dmitry.Kravkov@qlogic.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarAriel Elior <Ariel.Elior@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c0b80236
...@@ -2082,6 +2082,10 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, ...@@ -2082,6 +2082,10 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags); __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
if (rss_obj->udp_rss_v6) if (rss_obj->udp_rss_v6)
__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags); __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
if (!CHIP_IS_E1x(bp))
/* valid only for TUNN_MODE_GRE tunnel mode */
__set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
} else { } else {
__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags); __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
} }
...@@ -3441,26 +3445,6 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, ...@@ -3441,26 +3445,6 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
} }
#endif #endif
static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
u32 xmit_type)
{
struct ipv6hdr *ipv6;
*parsing_data |= (skb_shinfo(skb)->gso_size <<
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
ETH_TX_PARSE_BD_E2_LSO_MSS;
if (xmit_type & XMIT_GSO_ENC_V6)
ipv6 = inner_ipv6_hdr(skb);
else if (xmit_type & XMIT_GSO_V6)
ipv6 = ipv6_hdr(skb);
else
ipv6 = NULL;
if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
/** /**
* bnx2x_set_pbd_gso - update PBD in GSO case. * bnx2x_set_pbd_gso - update PBD in GSO case.
* *
...@@ -3470,7 +3454,6 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, ...@@ -3470,7 +3454,6 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
*/ */
static void bnx2x_set_pbd_gso(struct sk_buff *skb, static void bnx2x_set_pbd_gso(struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd, struct eth_tx_parse_bd_e1x *pbd,
struct eth_tx_start_bd *tx_start_bd,
u32 xmit_type) u32 xmit_type)
{ {
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
...@@ -3483,9 +3466,6 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb, ...@@ -3483,9 +3466,6 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb,
bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0)); 0, IPPROTO_TCP, 0));
/* GSO on 57710/57711 needs FW to calculate IP checksum */
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
} else { } else {
pbd->tcp_pseudo_csum = pbd->tcp_pseudo_csum =
bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
...@@ -3657,18 +3637,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, ...@@ -3657,18 +3637,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
(__force u32)iph->tot_len - (__force u32)iph->tot_len -
(__force u32)iph->frag_off; (__force u32)iph->frag_off;
outerip_len = iph->ihl << 1;
pbd2->fw_ip_csum_wo_len_flags_frag = pbd2->fw_ip_csum_wo_len_flags_frag =
bswab16(csum_fold((__force __wsum)csum)); bswab16(csum_fold((__force __wsum)csum));
} else { } else {
pbd2->fw_ip_hdr_to_payload_w = pbd2->fw_ip_hdr_to_payload_w =
hlen_w - ((sizeof(struct ipv6hdr)) >> 1); hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
pbd_e2->data.tunnel_data.flags |=
1 /*IPv6*/ << ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
} }
pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
if (xmit_type & XMIT_GSO_V4) { /* inner IP header info */
if (xmit_type & XMIT_CSUM_ENC_V4) {
pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
pbd_e2->data.tunnel_data.pseudo_csum = pbd_e2->data.tunnel_data.pseudo_csum =
...@@ -3676,8 +3661,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, ...@@ -3676,8 +3661,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
inner_ip_hdr(skb)->saddr, inner_ip_hdr(skb)->saddr,
inner_ip_hdr(skb)->daddr, inner_ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0)); 0, IPPROTO_TCP, 0));
outerip_len = ip_hdr(skb)->ihl << 1;
} else { } else {
pbd_e2->data.tunnel_data.pseudo_csum = pbd_e2->data.tunnel_data.pseudo_csum =
bswab16(~csum_ipv6_magic( bswab16(~csum_ipv6_magic(
...@@ -3690,8 +3673,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, ...@@ -3690,8 +3673,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
*global_data |= *global_data |=
outerip_off | outerip_off |
(!!(xmit_type & XMIT_CSUM_V6) <<
ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
(outerip_len << (outerip_len <<
ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
...@@ -3703,6 +3684,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, ...@@ -3703,6 +3684,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
} }
} }
static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
u32 xmit_type)
{
struct ipv6hdr *ipv6;
if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
return;
if (xmit_type & XMIT_GSO_ENC_V6)
ipv6 = inner_ipv6_hdr(skb);
else /* XMIT_GSO_V6 */
ipv6 = ipv6_hdr(skb);
if (ipv6->nexthdr == NEXTHDR_IPV6)
*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
/* called with netif_tx_lock /* called with netif_tx_lock
* bnx2x_tx_int() runs without netif_tx_lock unless it needs to call * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue() * netif_wake_queue()
...@@ -3919,6 +3917,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3919,6 +3917,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
xmit_type); xmit_type);
} }
bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
/* Add the macs to the parsing BD if this is a vf or if /* Add the macs to the parsing BD if this is a vf or if
* Tx Switching is enabled. * Tx Switching is enabled.
*/ */
...@@ -3984,10 +3983,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3984,10 +3983,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
bd_prod); bd_prod);
} }
if (!CHIP_IS_E1x(bp)) if (!CHIP_IS_E1x(bp))
bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, pbd_e2_parsing_data |=
xmit_type); (skb_shinfo(skb)->gso_size <<
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
ETH_TX_PARSE_BD_E2_LSO_MSS;
else else
bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type); bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
} }
/* Set the PBD's parsing_data field if not zero /* Set the PBD's parsing_data field if not zero
......
...@@ -932,8 +932,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp) ...@@ -932,8 +932,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */ else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR; start_params->network_cos_mode = FW_WRR;
start_params->gre_tunnel_mode = L2GRE_TUNNEL; start_params->tunnel_mode = TUNN_MODE_GRE;
start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS; start_params->gre_tunnel_type = IPGRE_TUNNEL;
start_params->inner_gre_rss_en = 1;
return bnx2x_func_state_change(bp, &func_params); return bnx2x_func_state_change(bp, &func_params);
} }
......
...@@ -7647,7 +7647,11 @@ static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend) ...@@ -7647,7 +7647,11 @@ static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
/* Function parameters */ /* Function parameters */
switch_update_params->suspend = suspend; __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
&switch_update_params->changes);
if (suspend)
__set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
&switch_update_params->changes);
rc = bnx2x_func_state_change(bp, &func_params); rc = bnx2x_func_state_change(bp, &func_params);
......
...@@ -4019,6 +4019,7 @@ static int bnx2x_setup_rss(struct bnx2x *bp, ...@@ -4019,6 +4019,7 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw; struct bnx2x_raw_obj *r = &o->raw;
struct eth_rss_update_ramrod_data *data = struct eth_rss_update_ramrod_data *data =
(struct eth_rss_update_ramrod_data *)(r->rdata); (struct eth_rss_update_ramrod_data *)(r->rdata);
u16 caps = 0;
u8 rss_mode = 0; u8 rss_mode = 0;
int rc; int rc;
...@@ -4042,28 +4043,27 @@ static int bnx2x_setup_rss(struct bnx2x *bp, ...@@ -4042,28 +4043,27 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
/* RSS capabilities */ /* RSS capabilities */
if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
data->capabilities |= caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
data->capabilities |= caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
data->capabilities |= caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
data->capabilities |= caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
data->capabilities |= caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
data->capabilities |= caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
data->capabilities = cpu_to_le16(caps);
/* Hashing mask */ /* Hashing mask */
data->rss_result_mask = p->rss_result_mask; data->rss_result_mask = p->rss_result_mask;
...@@ -4336,6 +4336,8 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, ...@@ -4336,6 +4336,8 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
test_bit(BNX2X_Q_FLG_FCOE, flags) ? test_bit(BNX2X_Q_FLG_FCOE, flags) ?
LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION;
DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n", DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
} }
...@@ -4357,12 +4359,13 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, ...@@ -4357,12 +4359,13 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
tx_data->force_default_pri_flg = tx_data->force_default_pri_flg =
test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
tx_data->refuse_outband_vlan_flg =
test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
tx_data->tunnel_lso_inc_ip_id = tx_data->tunnel_lso_inc_ip_id =
test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags); test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
tx_data->tunnel_non_lso_pcsum_location = tx_data->tunnel_non_lso_pcsum_location =
test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT : test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
PCSUM_ON_BD; CSUM_ON_BD;
tx_data->tx_status_block_id = params->fw_sb_id; tx_data->tx_status_block_id = params->fw_sb_id;
tx_data->tx_sb_index_number = params->sb_cq_index; tx_data->tx_sb_index_number = params->sb_cq_index;
...@@ -5652,8 +5655,11 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, ...@@ -5652,8 +5655,11 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
rdata->path_id = BP_PATH(bp); rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode; rdata->network_cos_mode = start_params->network_cos_mode;
rdata->gre_tunnel_mode = start_params->gre_tunnel_mode; rdata->tunnel_mode = start_params->tunnel_mode;
rdata->gre_tunnel_rss = start_params->gre_tunnel_rss; rdata->gre_tunnel_type = start_params->gre_tunnel_type;
rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
rdata->vxlan_dst_port = cpu_to_le16(4789);
rdata->sd_vlan_eth_type = cpu_to_le16(0x8100);
/* No need for an explicit memory barrier here as long we would /* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element * need to ensure the ordering of writing to the SPQ element
...@@ -5680,8 +5686,28 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, ...@@ -5680,8 +5686,28 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata)); memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */ /* Fill the ramrod data with provided parameters */
if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
&switch_update_params->changes)) {
rdata->tx_switch_suspend_change_flg = 1; rdata->tx_switch_suspend_change_flg = 1;
rdata->tx_switch_suspend = switch_update_params->suspend; rdata->tx_switch_suspend =
test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
&switch_update_params->changes);
}
if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
&switch_update_params->changes)) {
rdata->update_tunn_cfg_flg = 1;
if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
&switch_update_params->changes))
rdata->tunn_clss_en = 1;
if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
&switch_update_params->changes))
rdata->inner_gre_rss_en = 1;
rdata->tunnel_mode = switch_update_params->tunnel_mode;
rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
rdata->vxlan_dst_port = cpu_to_le16(4789);
}
rdata->echo = SWITCH_UPDATE; rdata->echo = SWITCH_UPDATE;
/* No need for an explicit memory barrier here as long as we /* No need for an explicit memory barrier here as long as we
......
...@@ -711,6 +711,7 @@ enum { ...@@ -711,6 +711,7 @@ enum {
BNX2X_RSS_IPV6, BNX2X_RSS_IPV6,
BNX2X_RSS_IPV6_TCP, BNX2X_RSS_IPV6_TCP,
BNX2X_RSS_IPV6_UDP, BNX2X_RSS_IPV6_UDP,
BNX2X_RSS_GRE_INNER_HDRS,
}; };
struct bnx2x_config_rss_params { struct bnx2x_config_rss_params {
...@@ -831,6 +832,7 @@ enum { ...@@ -831,6 +832,7 @@ enum {
BNX2X_Q_FLG_ANTI_SPOOF, BNX2X_Q_FLG_ANTI_SPOOF,
BNX2X_Q_FLG_SILENT_VLAN_REM, BNX2X_Q_FLG_SILENT_VLAN_REM,
BNX2X_Q_FLG_FORCE_DEFAULT_PRI, BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN,
BNX2X_Q_FLG_PCSUM_ON_PKT, BNX2X_Q_FLG_PCSUM_ON_PKT,
BNX2X_Q_FLG_TUN_INC_INNER_IP_ID BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
}; };
...@@ -1085,6 +1087,16 @@ struct bnx2x_queue_sp_obj { ...@@ -1085,6 +1087,16 @@ struct bnx2x_queue_sp_obj {
}; };
/********************** Function state update *********************************/ /********************** Function state update *********************************/
/* UPDATE command options */
enum {
BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
};
/* Allowed Function states */ /* Allowed Function states */
enum bnx2x_func_state { enum bnx2x_func_state {
BNX2X_F_STATE_RESET, BNX2X_F_STATE_RESET,
...@@ -1146,18 +1158,25 @@ struct bnx2x_func_start_params { ...@@ -1146,18 +1158,25 @@ struct bnx2x_func_start_params {
/* Function cos mode */ /* Function cos mode */
u8 network_cos_mode; u8 network_cos_mode;
/* NVGRE classification enablement */ /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */
u8 nvgre_clss_en; u8 tunnel_mode;
/* tunneling classification enablement */
u8 tunn_clss_en;
/* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
u8 gre_tunnel_mode; u8 gre_tunnel_type;
/* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */ /* Enables Inner GRE RSS on the function, depends on the client RSS
u8 gre_tunnel_rss; * capailities
*/
u8 inner_gre_rss_en;
}; };
struct bnx2x_func_switch_update_params { struct bnx2x_func_switch_update_params {
u8 suspend; unsigned long changes; /* BNX2X_F_UPDATE_XX bits */
u8 tunnel_mode;
u8 gre_tunnel_type;
}; };
struct bnx2x_func_afex_update_params { struct bnx2x_func_afex_update_params {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment