Commit 071d08af authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-03-22

This series contains updates to ice driver only.

Akeem enables MAC anti-spoofing by default when a new VSI is being
created.  Fixes an issue when reclaiming VF resources back to the pool
after reset, by freeing VF resources separately using the first VF
vector index to traverse the list, instead of starting at the last
assigned vectors list.  Added support for VF & PF promiscuous mode in
the ice driver.  Fixed the PF driver from letting the VF know it is "not
trusted" when it attempts to add more than its permitted additional MAC
addresses.  Altered how the driver gets the VF VSIs instances, instead
of using the mailbox messages to retrieve VSIs, get it directly via the
VF object in the PF data structure.

Bruce fixes return values to resolve static analysis warnings.  Made
whitespace changes to increase readability and reduce code wrapping.

Anirudh cleans up code by removing a function prototype that was never
implemented and removed an unused field in the ice_sched_vsi_info
structure.

Kiran fixes a potential divide by zero issue by adding a check.

Victor cleans up the transmit scheduler by adjusting the stack variable
usage and added/modified debug prints to make them more useful.

Yashaswini updates the driver in VEB mode to ensure that the LAN_EN bit
is set if all the right conditions are met.

Christopher ensures the loopback enable bit is not set for prune switch
rules, since all transmit traffic would be looped back to the internal
switch and dropped.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bdaba895 f1ef73f5
......@@ -125,6 +125,23 @@ extern const char ice_drv_ver[];
#define ice_for_each_q_vector(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \
ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX)
#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
ICE_PROMISC_MCAST_TX | \
ICE_PROMISC_UCAST_RX | \
ICE_PROMISC_MCAST_RX | \
ICE_PROMISC_VLAN_TX | \
ICE_PROMISC_VLAN_RX)
#define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
#define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \
ICE_PROMISC_MCAST_RX | \
ICE_PROMISC_VLAN_TX | \
ICE_PROMISC_VLAN_RX)
struct ice_tc_info {
u16 qoffset;
u16 qcount_tx;
......@@ -258,6 +275,7 @@ struct ice_vsi {
u8 irqs_ready;
u8 current_isup; /* Sync 'link up' logging */
u8 stat_offsets_loaded;
u8 vlan_ena;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
......@@ -367,7 +385,8 @@ struct ice_netdev_priv {
* @vsi: pointer to vsi struct, can be NULL
* @q_vector: pointer to q_vector, can be NULL
*/
static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
static inline void
ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
struct ice_q_vector *q_vector)
{
u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx :
......
......@@ -331,7 +331,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
/* flag cleared so calling functions don't call AQ again */
pi->phy.get_link_info = false;
return status;
return 0;
}
/**
......@@ -1100,8 +1100,9 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
*
* Dumps debug log about control command with descriptor contents.
*/
void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
void *buf, u16 buf_len)
void
ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
u16 buf_len)
{
struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
u16 len;
......@@ -1620,8 +1621,8 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
* @hw: pointer to the hardware structure
* @opc: capabilities type to discover - pass in the command opcode
*/
static enum ice_status ice_discover_caps(struct ice_hw *hw,
enum ice_adminq_opc opc)
static enum ice_status
ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
{
enum ice_status status;
u32 cap_count;
......@@ -2548,8 +2549,8 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
static void
ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u8 src_byte, dest_byte, mask;
u8 *from, *dest;
......@@ -2587,8 +2588,8 @@ static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
static void
ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u16 src_word, mask;
__le16 dest_word;
......@@ -2630,8 +2631,8 @@ static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
static void
ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u32 src_dword, mask;
__le32 dest_dword;
......@@ -2681,8 +2682,8 @@ static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
static void
ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u64 src_qword, mask;
__le64 dest_qword;
......@@ -3026,7 +3027,8 @@ void ice_replay_post(struct ice_hw *hw)
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
void
ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
{
u64 new_data;
......@@ -3057,7 +3059,8 @@ void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat)
{
u32 new_data;
......
......@@ -9,8 +9,8 @@
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
u16 buf_len);
void
ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
......@@ -28,8 +28,8 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words,
u16 *data);
enum ice_status
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
......@@ -106,8 +106,10 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
void
ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
#endif /* _ICE_COMMON_H_ */
......@@ -1156,7 +1156,8 @@ ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
*
* Reports speed/duplex settings based on media_type
*/
static int ice_get_link_ksettings(struct net_device *netdev,
static int
ice_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
......@@ -1565,7 +1566,8 @@ ice_set_link_ksettings(struct net_device *netdev,
*
* Returns Success if the command is supported.
*/
static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
static int
ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
u32 __always_unused *rule_locs)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
......@@ -2023,8 +2025,9 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
*/
static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
static int
ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
const u8 hfunc)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
......@@ -2179,7 +2182,8 @@ ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
return __ice_get_coalesce(netdev, ec, -1);
}
static int ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
static int
ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
struct ethtool_coalesce *ec)
{
return __ice_get_coalesce(netdev, ec, q_num);
......@@ -2324,7 +2328,8 @@ ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
return __ice_set_coalesce(netdev, ec, -1);
}
static int ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
static int
ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
struct ethtool_coalesce *ec)
{
return __ice_set_coalesce(netdev, ec, q_num);
......
......@@ -297,13 +297,19 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
* @vf_id: Id of the VF being configured
*
* Return 0 on success and a negative value on error
*/
static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
{
struct ice_pf *pf = vsi->back;
struct ice_vf *vf = NULL;
if (vsi->type == ICE_VSI_VF)
vsi->vf_id = vf_id;
switch (vsi->type) {
case ICE_VSI_PF:
vsi->alloc_txq = pf->num_lan_tx;
......@@ -311,8 +317,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
break;
case ICE_VSI_VF:
vsi->alloc_txq = pf->num_vf_qps;
vsi->alloc_rxq = pf->num_vf_qps;
vf = &pf->vf[vsi->vf_id];
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_vf_msix includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 from the original vector
......@@ -472,10 +479,12 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @type: type of VSI
* @vf_id: Id of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
{
struct ice_vsi *vsi = NULL;
......@@ -501,7 +510,10 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
vsi->idx = pf->next_vsi;
vsi->work_lmt = ICE_DFLT_IRQ_WORK;
ice_vsi_set_num_qs(vsi);
if (type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
switch (vsi->type) {
case ICE_VSI_PF:
......@@ -869,7 +881,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
tx_count += tx_numq_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
/* if offset is non-zero, means it is calculated correctly based on
* enabled TCs for a given VSI otherwise qcount_rx will always
* be correct and non-zero because it is based off - VSI's
* allocated Rx queues which is at least 1 (hence qcount_tx will be
* at least 1)
*/
if (offset)
vsi->num_rxq = offset;
else
vsi->num_rxq = qcount_rx;
vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
......@@ -940,6 +963,7 @@ static int ice_vsi_init(struct ice_vsi *vsi)
if (!ctxt)
return -ENOMEM;
ctxt->info = vsi->info;
switch (vsi->type) {
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
......@@ -965,6 +989,14 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ctxt->info.sw_id = vsi->port_info->sw_id;
ice_vsi_setup_q_map(vsi, ctxt);
/* Enable MAC Antispoof with new VSI being initialized or updated */
if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
ctxt->info.sec_flags |=
ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
}
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(&pf->pdev->dev,
......@@ -2075,8 +2107,9 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* @rst_src: reset source
* @rel_vmvf_num: Relative id of VF/VM
*/
int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
enum ice_disq_rst_src rst_src, u16 rel_vmvf_num)
int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num)
{
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings,
0);
......@@ -2086,10 +2119,11 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
* @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
*
* returns 0 if VSI is updated, negative otherwise
*/
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
{
struct ice_vsi_ctx *ctxt;
struct device *dev;
......@@ -2117,7 +2151,9 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
}
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
if (!vlan_promisc)
ctxt->info.valid_sections =
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
ICE_AQ_VSI_PROP_SW_VALID);
status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
......@@ -2162,7 +2198,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
vsi = ice_vsi_alloc(pf, type);
if (type == ICE_VSI_VF)
vsi = ice_vsi_alloc(pf, type, vf_id);
else
vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
return NULL;
......@@ -2646,6 +2686,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_vf *vf = NULL;
struct ice_pf *pf;
int ret, i;
......@@ -2653,16 +2694,38 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
return -EINVAL;
pf = vsi->back;
if (vsi->type == ICE_VSI_VF)
vf = &pf->vf[vsi->vf_id];
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
vsi->sw_base_vector = 0;
/* reclaim HW interrupts back to the common pool */
ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector,
vsi->idx);
pf->num_avail_hw_msix += vsi->num_q_vectors;
} else {
/* Reclaim VF resources back to the common pool for reset and
* and rebuild, with vector reassignment
*/
ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx,
vsi->idx);
pf->num_avail_hw_msix += pf->num_vf_msix;
}
vsi->hw_base_vector = 0;
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi, false);
ice_dev_onetime_setup(&vsi->back->hw);
ice_vsi_set_num_qs(vsi);
if (vsi->type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf->vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
......
......@@ -35,7 +35,7 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_vsi_delete(struct ice_vsi *vsi);
......
......@@ -167,6 +167,39 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
}
/**
* ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
* @vsi: the VSI being configured
* @promisc_m: mask of promiscuous config bits
* @set_promisc: enable or disable promisc flag request
*
*/
static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
{
struct ice_hw *hw = &vsi->back->hw;
enum ice_status status = 0;
if (vsi->type != ICE_VSI_PF)
return 0;
if (vsi->vlan_ena) {
status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
set_promisc);
} else {
if (set_promisc)
status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
0);
else
status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
0);
}
if (status)
return -EIO;
return 0;
}
/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
......@@ -182,6 +215,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
struct ice_hw *hw = &pf->hw;
enum ice_status status = 0;
u32 changed_flags = 0;
u8 promisc_m;
int err = 0;
if (!vsi->netdev)
......@@ -245,8 +279,35 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
}
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI)
netdev_warn(netdev, "Unsupported configuration\n");
if (changed_flags & IFF_ALLMULTI) {
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
if (vsi->vlan_ena)
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_MCAST_PROMISC_BITS;
err = ice_cfg_promisc(vsi, promisc_m, true);
if (err) {
netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
vsi->vsi_num);
vsi->current_netdev_flags &= ~IFF_ALLMULTI;
goto out_promisc;
}
} else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
if (vsi->vlan_ena)
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_MCAST_PROMISC_BITS;
err = ice_cfg_promisc(vsi, promisc_m, false);
if (err) {
netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
vsi->vsi_num);
vsi->current_netdev_flags |= IFF_ALLMULTI;
goto out_promisc;
}
}
}
if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
......@@ -1113,7 +1174,8 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
* This is a callback function used by the irq_set_affinity_notifier function
* so that we may register to receive changes to the irq affinity masks.
*/
static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
static void
ice_irq_affinity_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct ice_q_vector *q_vector =
......@@ -1658,11 +1720,13 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
*
* net_device_ops implementation for adding vlan ids
*/
static int ice_vlan_rx_add_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
static int
ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
int ret;
if (vid >= VLAN_N_VID) {
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
......@@ -1675,8 +1739,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid)) {
int ret = ice_cfg_vlan_pruning(vsi, true);
ret = ice_cfg_vlan_pruning(vsi, true, false);
if (ret)
return ret;
}
......@@ -1685,7 +1748,13 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
*/
return ice_vsi_add_vlan(vsi, vid);
ret = ice_vsi_add_vlan(vsi, vid);
if (!ret) {
vsi->vlan_ena = true;
set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
}
return ret;
}
/**
......@@ -1696,12 +1765,13 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
*
* net_device_ops implementation for removing vlan ids
*/
static int ice_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
static int
ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
int status;
int ret;
if (vsi->info.pvid)
return -EINVAL;
......@@ -1709,15 +1779,17 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
* information
*/
status = ice_vsi_kill_vlan(vsi, vid);
if (status)
return status;
ret = ice_vsi_kill_vlan(vsi, vid);
if (ret)
return ret;
/* Disable VLAN pruning when VLAN 0 is removed */
if (unlikely(!vid))
status = ice_cfg_vlan_pruning(vsi, false);
ret = ice_cfg_vlan_pruning(vsi, false, false);
return status;
vsi->vlan_ena = false;
set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
return ret;
}
/**
......@@ -2057,8 +2129,8 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
*
* Returns 0 on success, negative on failure
*/
static int ice_probe(struct pci_dev *pdev,
const struct pci_device_id __always_unused *ent)
static int
ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{
struct device *dev = &pdev->dev;
struct ice_pf *pf;
......@@ -2493,7 +2565,8 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
* @addr: the MAC address entry being added
* @vid: VLAN id
*/
static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
static int
ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
__always_unused u16 vid)
{
......@@ -2519,8 +2592,8 @@ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
*/
static int ice_set_features(struct net_device *netdev,
netdev_features_t features)
static int
ice_set_features(struct net_device *netdev, netdev_features_t features)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
......@@ -2647,7 +2720,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_service_task_schedule(pf);
return err;
return 0;
}
/**
......@@ -2674,8 +2747,8 @@ int ice_up(struct ice_vsi *vsi)
* This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine.
*/
static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,
u64 *bytes)
static void
ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
{
unsigned int start;
*pkts = 0;
......
......@@ -276,7 +276,8 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
if (status || num_groups_removed != 1)
ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
return status;
......@@ -360,12 +361,8 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = le32_to_cpu(node->info.node_teid);
enum ice_status status;
status = ice_sched_remove_elems(hw, node->parent, 1, &teid);
if (status)
ice_debug(hw, ICE_DBG_SCHED,
"remove element failed %d\n", status);
ice_sched_remove_elems(hw, node->parent, 1, &teid);
}
parent = node->parent;
/* root has no parent */
......@@ -697,7 +694,8 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
&num_groups_added, NULL);
if (status || num_groups_added != 1) {
ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n");
ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
return ICE_ERR_CFG;
}
......@@ -1502,7 +1500,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
vsi_ctx->sched.max_lanq[tc] = new_numqs;
return status;
return 0;
}
/**
......@@ -1527,6 +1525,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
return ICE_ERR_PARAM;
......@@ -1646,8 +1645,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
{
enum ice_status status = ICE_ERR_PARAM;
struct ice_vsi_ctx *vsi_ctx;
u8 i, j = 0;
u8 i;
ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
return status;
mutex_lock(&pi->sched_lock);
......@@ -1657,6 +1657,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
struct ice_sched_node *vsi_node, *tc_node;
u8 j = 0;
tc_node = ice_sched_get_tc_node(pi, i);
if (!tc_node)
......
......@@ -322,8 +322,8 @@ struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
*
* save the VSI context entry for a given VSI handle
*/
static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
struct ice_vsi_ctx *vsi)
static void
ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
{
hw->vsi_ctx[vsi_handle] = vsi;
}
......@@ -398,7 +398,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
}
return status;
return 0;
}
/**
......@@ -643,22 +643,44 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
fi->fltr_act == ICE_FWD_TO_Q ||
fi->fltr_act == ICE_FWD_TO_QGRP)) {
/* Setting LB for prune actions will result in replicated
* packets to the internal switch that will be dropped.
*/
if (fi->lkup_type != ICE_SW_LKUP_VLAN)
fi->lb_en = true;
/* Do not set lan_en to TRUE if
/* Set lan_en to TRUE if
* 1. The switch is a VEB AND
* 2
* 2.1 The lookup is MAC with unicast addr for MAC, OR
* 2.2 The lookup is MAC_VLAN with unicast addr for MAC
* 2.1 The lookup is a directional lookup like ethertype,
* promiscuous, ethertype-mac, promiscuous-vlan
* and default-port OR
* 2.2 The lookup is VLAN, OR
* 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
* 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
*
* In all other cases, the LAN enable has to be set to true.
*/
if (!(hw->evb_veb &&
((fi->lkup_type == ICE_SW_LKUP_MAC &&
is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
* OR
*
* The switch is a VEPA.
*
* In all other cases, the LAN enable has to be set to false.
*/
if (hw->evb_veb) {
if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
fi->lkup_type == ICE_SW_LKUP_PROMISC ||
fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
fi->lkup_type == ICE_SW_LKUP_DFLT ||
fi->lkup_type == ICE_SW_LKUP_VLAN ||
(fi->lkup_type == ICE_SW_LKUP_MAC &&
!is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
(fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr)))))
!is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
fi->lan_en = true;
} else {
fi->lan_en = true;
}
}
}
/**
......@@ -2189,6 +2211,291 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
return status;
}
/**
* ice_determine_promisc_mask
* @fi: filter info to parse
*
* Helper function to determine which ICE_PROMISC_ mask corresponds
* to given filter into.
*/
static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
{
u16 vid = fi->l_data.mac_vlan.vlan_id;
u8 *macaddr = fi->l_data.mac.mac_addr;
bool is_tx_fltr = false;
u8 promisc_mask = 0;
if (fi->flag == ICE_FLTR_TX)
is_tx_fltr = true;
if (is_broadcast_ether_addr(macaddr))
promisc_mask |= is_tx_fltr ?
ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
else if (is_multicast_ether_addr(macaddr))
promisc_mask |= is_tx_fltr ?
ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
else if (is_unicast_ether_addr(macaddr))
promisc_mask |= is_tx_fltr ?
ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
if (vid)
promisc_mask |= is_tx_fltr ?
ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
return promisc_mask;
}
/**
* ice_remove_promisc - Remove promisc based filter rules
* @hw: pointer to the hardware structure
* @recp_id: recipe id for which the rule needs to removed
* @v_list: list of promisc entries
*/
static enum ice_status
ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr, *tmp;
list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
v_list_itr->status =
ice_remove_rule_internal(hw, recp_id, v_list_itr);
if (v_list_itr->status)
return v_list_itr->status;
}
return 0;
}
/**
* ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to clear mode
* @promisc_mask: mask of promiscuous config bits to clear
* @vid: VLAN ID to clear VLAN promiscuous
*/
enum ice_status
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_list_entry *fm_entry, *tmp;
struct list_head remove_list_head;
struct ice_fltr_mgmt_list_entry *itr;
struct list_head *rule_head;
struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status = 0;
u8 recipe_id;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
if (vid)
recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
else
recipe_id = ICE_SW_LKUP_PROMISC;
rule_head = &sw->recp_list[recipe_id].filt_rules;
rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
INIT_LIST_HEAD(&remove_list_head);
mutex_lock(rule_lock);
list_for_each_entry(itr, rule_head, list_entry) {
u8 fltr_promisc_mask = 0;
if (!ice_vsi_uses_fltr(itr, vsi_handle))
continue;
fltr_promisc_mask |=
ice_determine_promisc_mask(&itr->fltr_info);
/* Skip if filter is not completely specified by given mask */
if (fltr_promisc_mask & ~promisc_mask)
continue;
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
&remove_list_head,
&itr->fltr_info);
if (status) {
mutex_unlock(rule_lock);
goto free_fltr_list;
}
}
mutex_unlock(rule_lock);
status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
free_fltr_list:
list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
list_del(&fm_entry->list_entry);
devm_kfree(ice_hw_to_dev(hw), fm_entry);
}
return status;
}
/**
* ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to configure
* @promisc_mask: mask of promiscuous config bits
* @vid: VLAN ID to set VLAN promiscuous
*/
enum ice_status
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
{
enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
struct ice_fltr_list_entry f_list_entry;
struct ice_fltr_info new_fltr;
enum ice_status status = 0;
bool is_tx_fltr;
u16 hw_vsi_id;
int pkt_type;
u8 recipe_id;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
memset(&new_fltr, 0, sizeof(new_fltr));
if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
new_fltr.l_data.mac_vlan.vlan_id = vid;
recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
} else {
new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
recipe_id = ICE_SW_LKUP_PROMISC;
}
/* Separate filters must be set for each direction/packet type
* combination, so we will loop over the mask value, store the
* individual type, and clear it out in the input mask as it
* is found.
*/
while (promisc_mask) {
u8 *mac_addr;
pkt_type = 0;
is_tx_fltr = false;
if (promisc_mask & ICE_PROMISC_UCAST_RX) {
promisc_mask &= ~ICE_PROMISC_UCAST_RX;
pkt_type = UCAST_FLTR;
} else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
promisc_mask &= ~ICE_PROMISC_UCAST_TX;
pkt_type = UCAST_FLTR;
is_tx_fltr = true;
} else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
promisc_mask &= ~ICE_PROMISC_MCAST_RX;
pkt_type = MCAST_FLTR;
} else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
promisc_mask &= ~ICE_PROMISC_MCAST_TX;
pkt_type = MCAST_FLTR;
is_tx_fltr = true;
} else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
promisc_mask &= ~ICE_PROMISC_BCAST_RX;
pkt_type = BCAST_FLTR;
} else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
promisc_mask &= ~ICE_PROMISC_BCAST_TX;
pkt_type = BCAST_FLTR;
is_tx_fltr = true;
}
/* Check for VLAN promiscuous flag */
if (promisc_mask & ICE_PROMISC_VLAN_RX) {
promisc_mask &= ~ICE_PROMISC_VLAN_RX;
} else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
promisc_mask &= ~ICE_PROMISC_VLAN_TX;
is_tx_fltr = true;
}
/* Set filter DA based on packet type */
mac_addr = new_fltr.l_data.mac.mac_addr;
if (pkt_type == BCAST_FLTR) {
eth_broadcast_addr(mac_addr);
} else if (pkt_type == MCAST_FLTR ||
pkt_type == UCAST_FLTR) {
/* Use the dummy ether header DA */
ether_addr_copy(mac_addr, dummy_eth_header);
if (pkt_type == MCAST_FLTR)
mac_addr[0] |= 0x1; /* Set multicast bit */
}
/* Need to reset this to zero for all iterations */
new_fltr.flag = 0;
if (is_tx_fltr) {
new_fltr.flag |= ICE_FLTR_TX;
new_fltr.src = hw_vsi_id;
} else {
new_fltr.flag |= ICE_FLTR_RX;
new_fltr.src = hw->port_info->lport;
}
new_fltr.fltr_act = ICE_FWD_TO_VSI;
new_fltr.vsi_handle = vsi_handle;
new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
f_list_entry.fltr_info = new_fltr;
status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
if (status)
goto set_promisc_exit;
}
set_promisc_exit:
return status;
}
/**
* ice_set_vlan_vsi_promisc
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to configure
* @promisc_mask: mask of promiscuous config bits
* @rm_vlan_promisc: Clear VLANs VSI promisc mode
*
* Configure VSI with all associated VLANs to given promiscuous mode(s)
*/
enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_list_entry *list_itr, *tmp;
struct list_head vsi_list_head;
struct list_head *vlan_head;
struct mutex *vlan_lock; /* Lock to protect filter rule list */
enum ice_status status;
u16 vlan_id;
INIT_LIST_HEAD(&vsi_list_head);
vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
mutex_lock(vlan_lock);
status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
&vsi_list_head);
mutex_unlock(vlan_lock);
if (status)
goto free_fltr_list;
list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
status = ice_clear_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
else
status = ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
if (status)
break;
}
free_fltr_list:
list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
list_del(&list_itr->list_entry);
devm_kfree(ice_hw_to_dev(hw), list_itr);
}
return status;
}
/**
* ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
* @hw: pointer to the hardware structure
......@@ -2224,12 +2531,14 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
case ICE_SW_LKUP_VLAN:
ice_remove_vlan(hw, &remove_list_head);
break;
case ICE_SW_LKUP_PROMISC:
case ICE_SW_LKUP_PROMISC_VLAN:
ice_remove_promisc(hw, lkup, &remove_list_head);
break;
case ICE_SW_LKUP_MAC_VLAN:
case ICE_SW_LKUP_ETHERTYPE:
case ICE_SW_LKUP_ETHERTYPE_MAC:
case ICE_SW_LKUP_PROMISC:
case ICE_SW_LKUP_DFLT:
case ICE_SW_LKUP_PROMISC_VLAN:
case ICE_SW_LKUP_LAST:
default:
ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
......
......@@ -178,6 +178,17 @@ struct ice_fltr_mgmt_list_entry {
u8 counter_index;
};
enum ice_promisc_flags {
ICE_PROMISC_UCAST_RX = 0x1,
ICE_PROMISC_UCAST_TX = 0x2,
ICE_PROMISC_MCAST_RX = 0x4,
ICE_PROMISC_MCAST_TX = 0x8,
ICE_PROMISC_BCAST_RX = 0x10,
ICE_PROMISC_BCAST_TX = 0x20,
ICE_PROMISC_VLAN_RX = 0x40,
ICE_PROMISC_VLAN_TX = 0x80,
};
/* VSI related commands */
enum ice_status
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
......@@ -199,10 +210,22 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
/* Promisc/defport setup for VSIs */
enum ice_status
ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
enum ice_status
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
enum ice_status
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
......
......@@ -100,8 +100,8 @@ void ice_free_tx_ring(struct ice_ring *tx_ring)
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
int napi_budget)
static bool
ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget)
{
unsigned int total_bytes = 0, total_pkts = 0;
unsigned int budget = vsi->work_lmt;
......@@ -389,8 +389,8 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
* Returns true if the page was successfully allocated or
* reused.
*/
static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
struct ice_rx_buf *bi)
static bool
ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
{
struct page *page = bi->page;
dma_addr_t dma;
......@@ -510,8 +510,8 @@ static bool ice_page_is_reserved(struct page *page)
* The function will then update the page offset if necessary and return
* true if the buffer can be reused by the adapter.
*/
static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
union ice_32b_rx_flex_desc *rx_desc,
static bool
ice_add_rx_frag(struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb)
{
#if (PAGE_SIZE < 8192)
......@@ -587,8 +587,8 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
*
* Synchronizes page for reuse by the adapter
*/
static void ice_reuse_rx_page(struct ice_ring *rx_ring,
struct ice_rx_buf *old_buf)
static void
ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
{
u16 nta = rx_ring->next_to_alloc;
struct ice_rx_buf *new_buf;
......@@ -613,8 +613,8 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring,
* correctly, as well as handling calling the page recycle function if
* necessary.
*/
static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc)
static struct sk_buff *
ice_fetch_rx_buf(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
{
struct ice_rx_buf *rx_buf;
struct sk_buff *skb;
......@@ -751,8 +751,8 @@ static bool ice_cleanup_headers(struct sk_buff *skb)
* The status_error_len doesn't need to be shifted because it begins
* at offset zero.
*/
static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
const u16 stat_err_bits)
static bool
ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
{
return !!(rx_desc->wb.status_error0 &
cpu_to_le16(stat_err_bits));
......@@ -769,8 +769,8 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
* sk_buff in the next buffer to be chained and return true indicating
* that this is in fact a non-EOP buffer.
*/
static bool ice_is_non_eop(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
static bool
ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb)
{
u32 ntc = rx_ring->next_to_clean + 1;
......@@ -838,7 +838,8 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*
* skb->protocol must be set before this function is called
*/
static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
static void
ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
{
struct ice_rx_ptype_decoded decoded;
......@@ -909,7 +910,8 @@ static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
*/
static void ice_process_skb_fields(struct ice_ring *rx_ring,
static void
ice_process_skb_fields(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u8 ptype)
{
......@@ -930,8 +932,8 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring,
* This function sends the completed packet (via. skb) up the stack using
* gro receive functions (with/without vlan tag)
*/
static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
u16 vlan_tag)
static void
ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK)) {
......
......@@ -248,7 +248,6 @@ struct ice_sched_vsi_info {
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
u16 vsi_id;
};
/* driver defines the policy */
......
......@@ -495,6 +495,8 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
*/
static int ice_alloc_vf_res(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
int tx_rx_queue_left;
int status;
/* setup VF VSI and necessary resources */
......@@ -502,6 +504,15 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
if (status)
goto ice_alloc_vf_res_exit;
/* Update number of VF queues, in case VF had requested for queue
* changes
*/
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
vf->num_req_qs != vf->num_vf_qs)
vf->num_vf_qs = vf->num_req_qs;
if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else
......@@ -760,6 +771,47 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
}
/**
* ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
* @vf: pointer to the VF info
* @vsi: the VSI being configured
* @promisc_m: mask of promiscuous config bits
* @rm_promisc: promisc flag request from the VF to remove or add filter
*
* This function configures VF VSI promiscuous mode, based on the VF requests,
* for Unicast, Multicast and VLAN
*/
static enum ice_status
ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
bool rm_promisc)
{
struct ice_pf *pf = vf->pf;
enum ice_status status = 0;
struct ice_hw *hw;
hw = &pf->hw;
if (vf->num_vlan) {
status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
rm_promisc);
} else if (vf->port_vlan_id) {
if (rm_promisc)
status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
vf->port_vlan_id);
else
status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
vf->port_vlan_id);
} else {
if (rm_promisc)
status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
0);
else
status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
0);
}
return status;
}
/**
* ice_reset_all_vfs - reset all allocated VFs in one go
* @pf: pointer to the PF structure
......@@ -835,8 +887,18 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
usleep_range(10000, 20000);
/* free VF resources to begin resetting the VSI state */
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_free_vf_res(&pf->vf[v]);
for (v = 0; v < pf->num_alloc_vfs; v++) {
vf = &pf->vf[v];
ice_free_vf_res(vf);
/* Free VF queues as well, and reallocate later.
* If a given VF has different number of queues
* configured, the request for update will come
* via mailbox communication.
*/
vf->num_vf_qs = 0;
}
if (ice_check_avail_res(pf)) {
dev_err(&pf->pdev->dev,
......@@ -845,8 +907,15 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
}
/* Finish the reset on each VF */
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_cleanup_and_realloc_vf(&pf->vf[v]);
for (v = 0; v < pf->num_alloc_vfs; v++) {
vf = &pf->vf[v];
vf->num_vf_qs = pf->num_vf_qps;
dev_dbg(&pf->pdev->dev,
"VF-id %d has %d queues configured\n",
vf->vf_id, vf->num_vf_qs);
ice_cleanup_and_realloc_vf(vf);
}
ice_flush(hw);
clear_bit(__ICE_VF_DIS, pf->state);
......@@ -864,9 +933,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{
struct ice_pf *pf = vf->pf;
struct ice_hw *hw = &pf->hw;
struct ice_vsi *vsi;
struct ice_hw *hw;
bool rsd = false;
u8 promisc_m;
u32 reg;
int i;
......@@ -892,6 +962,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
vf->vf_id, NULL);
}
hw = &pf->hw;
/* poll VPGEN_VFRSTAT reg to make sure
* that reset is complete
*/
......@@ -917,6 +988,21 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
usleep_range(10000, 20000);
/* disable promiscuous modes in case they were enabled
* ignore any error if disabling process failed
*/
if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
if (vf->port_vlan_id || vf->num_vlan)
promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_UCAST_PROMISC_BITS;
vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n");
}
/* free VF resources to begin resetting the VSI state */
ice_free_vf_res(vf);
......@@ -1199,8 +1285,9 @@ static void ice_vc_dis_vf(struct ice_vf *vf)
*
* send msg to VF
*/
static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
enum ice_status v_retval, u8 *msg, u16 msglen)
static int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, enum ice_status v_retval,
u8 *msg, u16 msglen)
{
enum ice_status aq_ret;
struct ice_pf *pf;
......@@ -1303,6 +1390,11 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
aq_ret = ICE_ERR_PARAM;
goto err;
}
if (!vsi->info.pvid)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
......@@ -1436,6 +1528,7 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
struct ice_vsi *vsi = NULL;
struct ice_pf *pf = vf->pf;
enum ice_status aq_ret;
int ret;
......@@ -1449,7 +1542,7 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
goto error_param;
}
vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id);
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
aq_ret = ICE_ERR_PARAM;
goto error_param;
......@@ -1483,6 +1576,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
{
struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
struct ice_vsi *vsi = NULL;
struct ice_pf *pf = vf->pf;
enum ice_status aq_ret;
int ret;
......@@ -1496,7 +1590,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
goto error_param;
}
vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id);
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
aq_ret = ICE_ERR_PARAM;
goto error_param;
......@@ -1531,6 +1625,7 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
struct ice_eth_stats stats;
struct ice_vsi *vsi;
......@@ -1544,7 +1639,7 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
aq_ret = ICE_ERR_PARAM;
goto error_param;
......@@ -1573,6 +1668,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
......@@ -1590,7 +1686,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
aq_ret = ICE_ERR_PARAM;
goto error_param;
......@@ -1626,6 +1722,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
......@@ -1644,7 +1741,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
aq_ret = ICE_ERR_PARAM;
goto error_param;
......@@ -1710,7 +1807,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
aq_ret = ICE_ERR_PARAM;
goto error_param;
......@@ -1766,6 +1863,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
enum ice_status aq_ret = 0;
struct ice_vsi *vsi;
int i;
......@@ -1780,12 +1878,20 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id);
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
aq_ret = ICE_ERR_PARAM;
goto error_param;
}
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, qci->num_queue_pairs);
aq_ret = ICE_ERR_PARAM;
goto error_param;
}
for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
if (qpi->txq.vsi_id != qci->vsi_id ||
......@@ -1873,7 +1979,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
(struct virtchnl_ether_addr_list *)msg;
struct ice_pf *pf = vf->pf;
enum virtchnl_ops vc_op;
enum ice_status ret;
enum ice_status ret = 0;
LIST_HEAD(mac_list);
struct ice_vsi *vsi;
int mac_count = 0;
......@@ -1893,12 +1999,19 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
dev_err(&pf->pdev->dev,
"Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n");
ret = ICE_ERR_PARAM;
"Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
vf->vf_id);
/* There is no need to let VF know about not being trusted
* to add more MAC addr, so we can just return success message.
*/
goto handle_mac_exit;
}
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
ret = ICE_ERR_PARAM;
goto handle_mac_exit;
}
for (i = 0; i < al->num_elements; i++) {
u8 *maddr = al->list[i].addr;
......@@ -2013,6 +2126,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
int req_queues = vfres->num_queue_pairs;
enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
int max_allowed_vf_queues;
int tx_rx_queue_left;
int cur_queues;
......@@ -2021,22 +2135,24 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
cur_queues = pf->num_vf_qps;
cur_queues = vf->num_vf_qs;
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
if (req_queues <= 0) {
dev_err(&pf->pdev->dev,
"VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_queues);
} else if (req_queues > ICE_MAX_QS_PER_VF) {
} else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
vf->vf_id, ICE_MAX_QS_PER_VF);
vfres->num_queue_pairs = ICE_MAX_QS_PER_VF;
vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(&pf->pdev->dev,
"VF %d requested %d more queues, but only %d left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
vfres->num_queue_pairs = tx_rx_queue_left + cur_queues;
vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues,
ICE_MAX_BASE_QS_PER_VF);
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
......@@ -2151,7 +2267,11 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
(struct virtchnl_vlan_filter_list *)msg;
enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
bool vlan_promisc = false;
struct ice_vsi *vsi;
struct ice_hw *hw;
int status = 0;
u8 promisc_m;
int i;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
......@@ -2167,8 +2287,11 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (add_v && !ice_is_vf_trusted(vf) &&
vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
dev_info(&pf->pdev->dev,
"VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n");
aq_ret = ICE_ERR_PARAM;
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being not trusted,
* so we can just return success message here
*/
goto error_param;
}
......@@ -2181,7 +2304,8 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
}
}
vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id);
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
aq_ret = ICE_ERR_PARAM;
goto error_param;
......@@ -2200,19 +2324,41 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
vlan_promisc = true;
if (add_v) {
for (i = 0; i < vfl->num_elements; i++) {
u16 vid = vfl->vlan_id[i];
if (!ice_vsi_add_vlan(vsi, vid)) {
vf->num_vlan++;
if (ice_vsi_add_vlan(vsi, vid)) {
aq_ret = ICE_ERR_PARAM;
goto error_param;
}
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid))
if (ice_cfg_vlan_pruning(vsi, true))
vf->num_vlan++;
/* Enable VLAN pruning when VLAN is added */
if (!vlan_promisc) {
status = ice_cfg_vlan_pruning(vsi, true, false);
if (status) {
aq_ret = ICE_ERR_PARAM;
dev_err(&pf->pdev->dev,
"Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
vid, status);
goto error_param;
}
} else {
aq_ret = ICE_ERR_PARAM;
/* Enable Ucast/Mcast VLAN promiscuous mode */
promisc_m = ICE_PROMISC_VLAN_TX |
ICE_PROMISC_VLAN_RX;
status = ice_set_vsi_promisc(hw, vsi->idx,
promisc_m, vid);
if (status)
dev_err(&pf->pdev->dev,
"Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
vid, status);
}
}
} else {
......@@ -2222,12 +2368,22 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
/* Make sure ice_vsi_kill_vlan is successful before
* updating VLAN information
*/
if (!ice_vsi_kill_vlan(vsi, vid)) {
if (ice_vsi_kill_vlan(vsi, vid)) {
aq_ret = ICE_ERR_PARAM;
goto error_param;
}
vf->num_vlan--;
/* Disable VLAN pruning when removing VLAN */
ice_cfg_vlan_pruning(vsi, false, false);
/* Disable Unicast/Multicast VLAN promiscuous mode */
if (vlan_promisc) {
promisc_m = ICE_PROMISC_VLAN_TX |
ICE_PROMISC_VLAN_RX;
/* Disable VLAN pruning when removing VLAN 0 */
if (unlikely(!vid))
ice_cfg_vlan_pruning(vsi, false);
ice_clear_vsi_promisc(hw, vsi->idx,
promisc_m, vid);
}
}
}
......@@ -2310,6 +2466,11 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
}
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
aq_ret = ICE_ERR_PARAM;
goto error_param;
}
if (ice_vsi_manage_vlan_stripping(vsi, false))
aq_ret = ICE_ERR_AQ_ERROR;
......@@ -2458,8 +2619,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
*
* return VF configuration
*/
int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
struct ifla_vf_info *ivi)
int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
......
......@@ -70,6 +70,7 @@ struct ice_vf {
u8 spoofchk;
u16 num_mac;
u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */
u8 num_req_qs; /* num of queue pairs requested by VF */
};
......@@ -77,8 +78,8 @@ struct ice_vf {
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
struct ifla_vf_info *ivi);
int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
void ice_free_vfs(struct ice_pf *pf);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
......@@ -86,11 +87,9 @@ void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id,
u16 vlan_id, u8 qos, __be16 vlan_proto);
int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto);
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
......@@ -162,12 +161,5 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
static inline int
ice_set_vf_bw(struct net_device __always_unused *netdev,
int __always_unused vf_id, int __always_unused min_tx_rate,
int __always_unused max_tx_rate)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment