Commit e918c7bb authored by Paolo Abeni's avatar Paolo Abeni

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================

This series contains updates to ice driver only.

Lukasz removes unnecessary argument from ice_fdir_comp_rules().

Jakub adds support for ethtool 'ether' flow-type rules.

Jake moves setting of VF MSI-X value to initialization function and adds
tracking of VF relative MSI-X index.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  ice: store VF relative MSI-X index in q_vector->vf_reg_idx
  ice: set vf->num_msix in ice_initialize_vf_entry()
  ice: Implement 'flow-type ether' rules
  ice: Remove unnecessary argument from ice_fdir_comp_rules()
====================

Link: https://lore.kernel.org/r/20240412210534.916756-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 81b095ca b80d01ef
......@@ -459,7 +459,7 @@ struct ice_q_vector {
struct ice_vsi *vsi;
u16 v_idx; /* index in the vsi->q_vector array. */
u16 reg_idx;
u16 reg_idx; /* PF relative register index */
u8 num_ring_rx; /* total number of Rx rings in vector */
u8 num_ring_tx; /* total number of Tx rings in vector */
u8 wb_on_itr:1; /* if true, WB on ITR is enabled */
......@@ -481,6 +481,7 @@ struct ice_q_vector {
char name[ICE_INT_NAME_STR_LEN];
u16 total_events; /* net_dim(): number of interrupts processed */
u16 vf_reg_idx; /* VF relative register index */
struct msi_map irq;
} ____cacheline_internodealigned_in_smp;
......
......@@ -121,7 +121,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->irq.index = -ENOENT;
if (vsi->type == ICE_VSI_VF) {
q_vector->reg_idx = ice_calc_vf_reg_idx(vsi->vf, q_vector);
ice_calc_vf_reg_idx(vsi->vf, q_vector);
goto out;
} else if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi);
......@@ -145,6 +145,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
skip_alloc:
q_vector->reg_idx = q_vector->irq.index;
q_vector->vf_reg_idx = q_vector->irq.index;
/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
......
......@@ -41,6 +41,8 @@ static struct in6_addr zero_ipv6_addr_mask = {
static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
{
switch (flow) {
case ICE_FLTR_PTYPE_NONF_ETH:
return ETHER_FLOW;
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
return TCP_V4_FLOW;
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
......@@ -72,6 +74,8 @@ static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
{
switch (eth) {
case ETHER_FLOW:
return ICE_FLTR_PTYPE_NONF_ETH;
case TCP_V4_FLOW:
return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
case UDP_V4_FLOW:
......@@ -137,6 +141,10 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
switch (fsp->flow_type) {
case ETHER_FLOW:
fsp->h_u.ether_spec = rule->eth;
fsp->m_u.ether_spec = rule->eth_mask;
break;
case IPV4_USER_FLOW:
fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
fsp->h_u.usr_ip4_spec.proto = 0;
......@@ -1193,6 +1201,122 @@ ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
return 0;
}
/**
* ice_fdir_vlan_valid - validate VLAN data for Flow Director rule
* @dev: network interface device structure
* @fsp: pointer to ethtool Rx flow specification
*
* Return: true if vlan data is valid, false otherwise
*/
static bool ice_fdir_vlan_valid(struct device *dev,
struct ethtool_rx_flow_spec *fsp)
{
if (fsp->m_ext.vlan_etype && !eth_type_vlan(fsp->h_ext.vlan_etype))
return false;
if (fsp->m_ext.vlan_tci && ntohs(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
return false;
/* proto and vlan must have vlan-etype defined */
if (fsp->m_u.ether_spec.h_proto && fsp->m_ext.vlan_tci &&
!fsp->m_ext.vlan_etype) {
dev_warn(dev, "Filter with proto and vlan require also vlan-etype");
return false;
}
return true;
}
/**
* ice_set_ether_flow_seg - set address and protocol segments for ether flow
* @dev: network interface device structure
* @seg: flow segment for programming
* @eth_spec: mask data from ethtool
*
* Return: 0 on success and errno in case of error.
*/
static int ice_set_ether_flow_seg(struct device *dev,
struct ice_flow_seg_info *seg,
struct ethhdr *eth_spec)
{
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH);
/* empty rules are not valid */
if (is_zero_ether_addr(eth_spec->h_source) &&
is_zero_ether_addr(eth_spec->h_dest) &&
!eth_spec->h_proto)
return -EINVAL;
/* Ethertype */
if (eth_spec->h_proto == htons(0xFFFF)) {
ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_TYPE,
ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
} else if (eth_spec->h_proto) {
dev_warn(dev, "Only 0x0000 or 0xffff proto mask is allowed for flow-type ether");
return -EOPNOTSUPP;
}
/* Source MAC address */
if (is_broadcast_ether_addr(eth_spec->h_source))
ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
else if (!is_zero_ether_addr(eth_spec->h_source))
goto err_mask;
/* Destination MAC address */
if (is_broadcast_ether_addr(eth_spec->h_dest))
ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
else if (!is_zero_ether_addr(eth_spec->h_dest))
goto err_mask;
return 0;
err_mask:
dev_warn(dev, "Only 00:00:00:00:00:00 or ff:ff:ff:ff:ff:ff MAC address mask is allowed for flow-type ether");
return -EOPNOTSUPP;
}
/**
* ice_set_fdir_vlan_seg - set vlan segments for ether flow
* @seg: flow segment for programming
* @ext_masks: masks for additional RX flow fields
*
* Return: 0 on success and errno in case of error.
*/
static int
ice_set_fdir_vlan_seg(struct ice_flow_seg_info *seg,
struct ethtool_flow_ext *ext_masks)
{
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_VLAN);
if (ext_masks->vlan_etype) {
if (ext_masks->vlan_etype != htons(0xFFFF))
return -EOPNOTSUPP;
ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_S_VLAN,
ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
}
if (ext_masks->vlan_tci) {
if (ext_masks->vlan_tci != htons(0xFFFF))
return -EOPNOTSUPP;
ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_C_VLAN,
ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
}
return 0;
}
/**
* ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
* @pf: PF structure
......@@ -1209,7 +1333,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
struct device *dev = ice_pf_to_dev(pf);
enum ice_fltr_ptype fltr_idx;
struct ice_hw *hw = &pf->hw;
bool perfect_filter;
bool perfect_filter = false;
int ret;
seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
......@@ -1262,6 +1386,16 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
&perfect_filter);
break;
case ETHER_FLOW:
ret = ice_set_ether_flow_seg(dev, seg, &fsp->m_u.ether_spec);
if (!ret && (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci)) {
if (!ice_fdir_vlan_valid(dev, fsp)) {
ret = -EINVAL;
break;
}
ret = ice_set_fdir_vlan_seg(seg, &fsp->m_ext);
}
break;
default:
ret = -EINVAL;
}
......@@ -1823,6 +1957,10 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
break;
case ETHER_FLOW:
input->eth = fsp->h_u.ether_spec;
input->eth_mask = fsp->m_u.ether_spec;
break;
default:
/* not doing un-parsed flow types */
return -EINVAL;
......
......@@ -4,6 +4,8 @@
#include "ice_common.h"
/* These are training packet headers used to program flow director filters. */
static const u8 ice_fdir_eth_pkt[22];
static const u8 ice_fdir_tcpv4_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
......@@ -416,6 +418,11 @@ static const u8 ice_fdir_ip6_tun_pkt[] = {
/* Flow Director no-op training packet table */
static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {
{
ICE_FLTR_PTYPE_NONF_ETH,
sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt,
sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV4_TCP,
sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt,
......@@ -914,6 +921,21 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
* perspective. The input from user is from Rx filter perspective.
*/
switch (flow) {
case ICE_FLTR_PTYPE_NONF_ETH:
ice_pkt_insert_mac_addr(loc, input->eth.h_dest);
ice_pkt_insert_mac_addr(loc + ETH_ALEN, input->eth.h_source);
if (input->ext_data.vlan_tag || input->ext_data.vlan_type) {
ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET,
input->ext_data.vlan_type);
ice_pkt_insert_u16(loc, ICE_ETH_VLAN_TCI_OFFSET,
input->ext_data.vlan_tag);
ice_pkt_insert_u16(loc, ICE_ETH_TYPE_VLAN_OFFSET,
input->eth.h_proto);
} else {
ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET,
input->eth.h_proto);
}
break;
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
input->ip.v4.src_ip);
......@@ -1189,52 +1211,58 @@ static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b)
* ice_fdir_comp_rules - compare 2 filters
* @a: a Flow Director filter data structure
* @b: a Flow Director filter data structure
* @v6: bool true if v6 filter
*
* Returns true if the filters match
*/
static bool
ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6)
ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b)
{
enum ice_fltr_ptype flow_type = a->flow_type;
/* The calling function already checks that the two filters have the
* same flow_type.
*/
if (!v6) {
if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) {
if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
a->ip.v4.src_ip == b->ip.v4.src_ip &&
a->ip.v4.dst_port == b->ip.v4.dst_port &&
a->ip.v4.src_port == b->ip.v4.src_port)
return true;
} else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
a->ip.v4.src_ip == b->ip.v4.src_ip &&
a->ip.v4.l4_header == b->ip.v4.l4_header &&
a->ip.v4.proto == b->ip.v4.proto &&
a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
a->ip.v4.tos == b->ip.v4.tos)
return true;
}
} else {
if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) {
if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
a->ip.v6.src_port == b->ip.v6.src_port &&
!ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
b->ip.v6.dst_ip) &&
!ice_cmp_ipv6_addr(a->ip.v6.src_ip,
b->ip.v6.src_ip))
return true;
} else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
a->ip.v6.src_port == b->ip.v6.src_port)
return true;
}
switch (flow_type) {
case ICE_FLTR_PTYPE_NONF_ETH:
if (!memcmp(&a->eth, &b->eth, sizeof(a->eth)))
return true;
break;
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
a->ip.v4.src_ip == b->ip.v4.src_ip &&
a->ip.v4.dst_port == b->ip.v4.dst_port &&
a->ip.v4.src_port == b->ip.v4.src_port)
return true;
break;
case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
a->ip.v4.src_ip == b->ip.v4.src_ip &&
a->ip.v4.l4_header == b->ip.v4.l4_header &&
a->ip.v4.proto == b->ip.v4.proto &&
a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
a->ip.v4.tos == b->ip.v4.tos)
return true;
break;
case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
a->ip.v6.src_port == b->ip.v6.src_port &&
!ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
b->ip.v6.dst_ip) &&
!ice_cmp_ipv6_addr(a->ip.v6.src_ip,
b->ip.v6.src_ip))
return true;
break;
case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
a->ip.v6.src_port == b->ip.v6.src_port)
return true;
break;
default:
break;
}
return false;
......@@ -1253,19 +1281,10 @@ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input)
bool ret = false;
list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
enum ice_fltr_ptype flow_type;
if (rule->flow_type != input->flow_type)
continue;
flow_type = input->flow_type;
if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP ||
flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
ret = ice_fdir_comp_rules(rule, input, false);
else
ret = ice_fdir_comp_rules(rule, input, true);
ret = ice_fdir_comp_rules(rule, input);
if (ret) {
if (rule->fltr_id == input->fltr_id &&
rule->q_index != input->q_index)
......
......@@ -8,6 +8,9 @@
#define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF)
/* macros for offsets into packets for flow director programming */
#define ICE_ETH_TYPE_F_OFFSET 12
#define ICE_ETH_VLAN_TCI_OFFSET 14
#define ICE_ETH_TYPE_VLAN_OFFSET 16
#define ICE_IPV4_SRC_ADDR_OFFSET 26
#define ICE_IPV4_DST_ADDR_OFFSET 30
#define ICE_IPV4_TCP_SRC_PORT_OFFSET 34
......@@ -159,6 +162,8 @@ struct ice_fdir_fltr {
struct list_head fltr_node;
enum ice_fltr_ptype flow_type;
struct ethhdr eth, eth_mask;
union {
struct ice_fdir_v4 v4;
struct ice_fdir_v6 v6;
......
......@@ -360,13 +360,14 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
* @vf: VF to calculate the register index for
* @q_vector: a q_vector associated to the VF
*/
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
{
if (!vf || !q_vector)
return -EINVAL;
return;
/* always add one to account for the OICR being the first MSIX */
return vf->first_vector_idx + q_vector->v_idx + 1;
q_vector->vf_reg_idx = q_vector->v_idx + ICE_NONQ_VECS_VF;
q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx;
}
/**
......@@ -831,11 +832,6 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
pci_dev_get(vfdev);
/* set default number of MSI-X */
vf->num_msix = pf->vfs.num_msix_per;
vf->num_vf_qs = pf->vfs.num_qps_per;
ice_vc_set_default_allowlist(vf);
hash_add_rcu(vfs->table, &vf->entry, vf_id);
}
......
......@@ -49,7 +49,7 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
int
ice_get_vf_stats(struct net_device *netdev, int vf_id,
......@@ -130,11 +130,10 @@ ice_set_vf_bw(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
static inline int
static inline void
ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
struct ice_q_vector __always_unused *q_vector)
{
return 0;
}
static inline int
......
......@@ -203,6 +203,7 @@ struct ice_phy_info {
enum ice_fltr_ptype {
/* NONE - used for undef/error */
ICE_FLTR_PTYPE_NONF_NONE = 0,
ICE_FLTR_PTYPE_NONF_ETH,
ICE_FLTR_PTYPE_NONF_IPV4_UDP,
ICE_FLTR_PTYPE_NONF_IPV4_TCP,
ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
......
......@@ -992,10 +992,13 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
/* assign default capabilities */
vf->spoofchk = true;
vf->num_vf_qs = vfs->num_qps_per;
ice_vc_set_default_allowlist(vf);
ice_virtchnl_set_dflt_ops(vf);
/* set default number of MSI-X */
vf->num_msix = vfs->num_msix_per;
vf->num_vf_qs = vfs->num_qps_per;
/* ctrl_vsi_idx will be set to a valid value only when iAVF
* creates its first fdir rule.
*/
......
......@@ -1505,13 +1505,12 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
* ice_cfg_interrupt
* @vf: pointer to the VF info
* @vsi: the VSI being configured
* @vector_id: vector ID
* @map: vector map for mapping vectors to queues
* @q_vector: structure for interrupt vector
* configure the IRQ to queue map
*/
static int
ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
static enum virtchnl_status_code
ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
struct virtchnl_vector_map *map,
struct ice_q_vector *q_vector)
{
......@@ -1531,7 +1530,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
q_vector->num_ring_rx++;
q_vector->rx.itr_idx = map->rxitr_idx;
vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
ice_cfg_rxq_interrupt(vsi, vsi_q_id,
q_vector->vf_reg_idx,
q_vector->rx.itr_idx);
}
......@@ -1545,7 +1545,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
q_vector->num_ring_tx++;
q_vector->tx.itr_idx = map->txitr_idx;
vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
ice_cfg_txq_interrupt(vsi, vsi_q_id,
q_vector->vf_reg_idx,
q_vector->tx.itr_idx);
}
......@@ -1619,8 +1620,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
}
/* lookout for the invalid queue index */
v_ret = (enum virtchnl_status_code)
ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
if (v_ret)
goto error_param;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment