Commit b57e0d48 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
ice: switchdev bridge offload

Wojciech Drewek says:

Linux bridge provides ability to learn MAC addresses and vlans
detected on bridge's ports. As a result of this, FDB (forward data base)
entries are created and they can be offloaded to the HW. By adding
VF's port representors to the bridge together with the uplink netdev,
we can learn VF's and link partner's MAC addresses. This is achieved
by slow/exception-path, where packets that do not match any filters
(FDB entries in this case) are send to the bridge ports.

Driver keeps track of the netdevs added to the bridge
by listening for NETDEV_CHANGEUPPER event. We distinguish two types
of bridge ports: uplink port and VF's representor port. Linux
bridge always learns src MAC of the packet on rx path. With the
current slow-path implementation, it means that we will learn
VF's MAC on port repr (when the VF transmits the packet) and
link partner's MAC on uplink (when we receive it on uplink from LAN).

The driver is notified about learning of the MAC/VLAN by
SWITCHDEV_FDB_{ADD|DEL}_TO_DEVICE events. This is followed by creation
of the HW filter. The direction of the filter is based on port
type (uplink or VF repr). In case of the uplink, rule forwards
the packets to the LAN (matching on link partner's MAC). When the
notification is received on VF repr then the rule forwards the
packets to the associated VF (matching on VF's MAC).

This approach would not work on its own however. This is because if
one of the directions is offloaded, then the bridge would not be able
to learn the other one. If the egress rule is added (learned on uplink)
then the response from the VF will be sent directly to the LAN.
The packet will not got through slow-path, it would not be seen on
VF's port repr. Because of that, the bridge would not learn VF's MAC.

This is solved by introducing guard rule. It prevents forward rule from
working until the opposite direction is offloaded.

Aging is not fully supported yet, aging time is static for now. The
follow up submissions will introduce counters that will allow us to
keep track if the rule is actually being used or not.

A few fixes/changes are needed for this feature to work with ice driver.
These are introduced in first 5 patches.
Reviewed-by: default avatarVlad Buslov <vladbu@nvidia.com>

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  ice: add tracepoints for the switchdev bridge
  ice: implement static version of ageing
  ice: implement bridge port vlan
  ice: Add VLAN FDB support in switchdev mode
  ice: Add guard rule when creating FDB in switchdev
  ice: Switchdev FDB events support
  ice: Implement basic eswitch bridge setup
  ice: Unset src prune on uplink VSI
  ice: Disable vlan pruning for uplink VSI
  ice: Don't tx before switchdev is fully configured
  ice: Prohibit rx mode change in switchdev mode
  ice: Skip adv rules removal upon switchdev release
====================

Link: https://lore.kernel.org/r/20230724161152.2177196-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 20bf98c9 d129c2a2
...@@ -47,5 +47,5 @@ ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ...@@ -47,5 +47,5 @@ ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o
ice-$(CONFIG_GNSS) += ice_gnss.o ice-$(CONFIG_GNSS) += ice_gnss.o
...@@ -370,6 +370,7 @@ struct ice_vsi { ...@@ -370,6 +370,7 @@ struct ice_vsi {
u16 rx_buf_len; u16 rx_buf_len;
struct ice_aqc_vsi_props info; /* VSI properties */ struct ice_aqc_vsi_props info; /* VSI properties */
struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */
/* VSI stats */ /* VSI stats */
struct rtnl_link_stats64 net_stats; struct rtnl_link_stats64 net_stats;
...@@ -517,6 +518,7 @@ enum ice_misc_thread_tasks { ...@@ -517,6 +518,7 @@ enum ice_misc_thread_tasks {
struct ice_switchdev_info { struct ice_switchdev_info {
struct ice_vsi *control_vsi; struct ice_vsi *control_vsi;
struct ice_vsi *uplink_vsi; struct ice_vsi *uplink_vsi;
struct ice_esw_br_offloads *br_offloads;
bool is_running; bool is_running;
}; };
...@@ -626,6 +628,7 @@ struct ice_pf { ...@@ -626,6 +628,7 @@ struct ice_pf {
struct ice_lag *lag; /* Link Aggregation information */ struct ice_lag *lag; /* Link Aggregation information */
struct ice_switchdev_info switchdev; struct ice_switchdev_info switchdev;
struct ice_esw_br_port *br_port;
#define ICE_INVALID_AGG_NODE_ID 0 #define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1 #define ICE_PF_AGG_NODE_ID_START 1
...@@ -853,7 +856,7 @@ static inline bool ice_is_adq_active(struct ice_pf *pf) ...@@ -853,7 +856,7 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
return false; return false;
} }
bool netif_is_ice(struct net_device *dev); bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi); int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi); int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
int ice_vsi_open_ctrl(struct ice_vsi *vsi); int ice_vsi_open_ctrl(struct ice_vsi *vsi);
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include "ice.h" #include "ice.h"
#include "ice_lib.h" #include "ice_lib.h"
#include "ice_eswitch.h" #include "ice_eswitch.h"
#include "ice_eswitch_br.h"
#include "ice_fltr.h" #include "ice_fltr.h"
#include "ice_repr.h" #include "ice_repr.h"
#include "ice_devlink.h" #include "ice_devlink.h"
...@@ -103,17 +104,28 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) ...@@ -103,17 +104,28 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
rule_added = true; rule_added = true;
} }
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
if (vlan_ops->dis_rx_filtering(uplink_vsi))
goto err_dis_rx;
if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_uplink; goto err_override_uplink;
if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_control; goto err_override_control;
if (ice_vsi_update_local_lb(uplink_vsi, true))
goto err_override_local_lb;
return 0; return 0;
err_override_local_lb:
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
err_override_control: err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink: err_override_uplink:
vlan_ops->ena_rx_filtering(uplink_vsi);
err_dis_rx:
if (rule_added) if (rule_added)
ice_clear_dflt_vsi(uplink_vsi); ice_clear_dflt_vsi(uplink_vsi);
err_def_rx: err_def_rx:
...@@ -306,6 +318,9 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) ...@@ -306,6 +318,9 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
repr->src_vsi = vsi; repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num; repr->dst->u.port_info.port_id = vsi->vsi_num;
if (repr->br_port)
repr->br_port->vsi = vsi;
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) { if (ret) {
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI); ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
...@@ -331,6 +346,9 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -331,6 +346,9 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
np = netdev_priv(netdev); np = netdev_priv(netdev);
vsi = np->vsi; vsi = np->vsi;
if (!vsi || !ice_is_switchdev_running(vsi->back))
return NETDEV_TX_BUSY;
if (ice_is_reset_in_progress(vsi->back->state) || if (ice_is_reset_in_progress(vsi->back->state) ||
test_bit(ICE_VF_DIS, vsi->back->state)) test_bit(ICE_VF_DIS, vsi->back->state))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
...@@ -378,9 +396,14 @@ static void ice_eswitch_release_env(struct ice_pf *pf) ...@@ -378,9 +396,14 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
{ {
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi; struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
struct ice_vsi_vlan_ops *vlan_ops;
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
ice_vsi_update_local_lb(uplink_vsi, false);
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
vlan_ops->ena_rx_filtering(uplink_vsi);
ice_clear_dflt_vsi(uplink_vsi); ice_clear_dflt_vsi(uplink_vsi);
ice_fltr_add_mac_and_broadcast(uplink_vsi, ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr, uplink_vsi->port_info->mac.perm_addr,
...@@ -455,16 +478,24 @@ static void ice_eswitch_napi_disable(struct ice_pf *pf) ...@@ -455,16 +478,24 @@ static void ice_eswitch_napi_disable(struct ice_pf *pf)
*/ */
static int ice_eswitch_enable_switchdev(struct ice_pf *pf) static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
{ {
struct ice_vsi *ctrl_vsi; struct ice_vsi *ctrl_vsi, *uplink_vsi;
uplink_vsi = ice_get_main_vsi(pf);
if (!uplink_vsi)
return -ENODEV;
if (netif_is_any_bridge_port(uplink_vsi->netdev)) {
dev_err(ice_pf_to_dev(pf),
"Uplink port cannot be a bridge port\n");
return -EINVAL;
}
pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
if (!pf->switchdev.control_vsi) if (!pf->switchdev.control_vsi)
return -ENODEV; return -ENODEV;
ctrl_vsi = pf->switchdev.control_vsi; ctrl_vsi = pf->switchdev.control_vsi;
pf->switchdev.uplink_vsi = ice_get_main_vsi(pf); pf->switchdev.uplink_vsi = uplink_vsi;
if (!pf->switchdev.uplink_vsi)
goto err_vsi;
if (ice_eswitch_setup_env(pf)) if (ice_eswitch_setup_env(pf))
goto err_vsi; goto err_vsi;
...@@ -480,10 +511,15 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) ...@@ -480,10 +511,15 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
if (ice_vsi_open(ctrl_vsi)) if (ice_vsi_open(ctrl_vsi))
goto err_setup_reprs; goto err_setup_reprs;
if (ice_eswitch_br_offloads_init(pf))
goto err_br_offloads;
ice_eswitch_napi_enable(pf); ice_eswitch_napi_enable(pf);
return 0; return 0;
err_br_offloads:
ice_vsi_close(ctrl_vsi);
err_setup_reprs: err_setup_reprs:
ice_repr_rem_from_all_vfs(pf); ice_repr_rem_from_all_vfs(pf);
err_repr_add: err_repr_add:
...@@ -502,8 +538,8 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf) ...@@ -502,8 +538,8 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
ice_eswitch_napi_disable(pf); ice_eswitch_napi_disable(pf);
ice_eswitch_br_offloads_deinit(pf);
ice_eswitch_release_env(pf); ice_eswitch_release_env(pf);
ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
ice_eswitch_release_reprs(pf, ctrl_vsi); ice_eswitch_release_reprs(pf, ctrl_vsi);
ice_vsi_release(ctrl_vsi); ice_vsi_release(ctrl_vsi);
ice_repr_rem_from_all_vfs(pf); ice_repr_rem_from_all_vfs(pf);
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023, Intel Corporation. */
#include "ice.h"
#include "ice_eswitch_br.h"
#include "ice_repr.h"
#include "ice_switch.h"
#include "ice_vlan.h"
#include "ice_vf_vsi_vlan_ops.h"
#include "ice_trace.h"
#define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000)
static const struct rhashtable_params ice_fdb_ht_params = {
.key_offset = offsetof(struct ice_esw_br_fdb_entry, data),
.key_len = sizeof(struct ice_esw_br_fdb_data),
.head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node),
.automatic_shrinking = true,
};
static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev)
{
/* Accept only PF netdev and PRs */
return ice_is_port_repr_netdev(dev) || netif_is_ice(dev);
}
static struct ice_esw_br_port *
ice_eswitch_br_netdev_to_port(struct net_device *dev)
{
if (ice_is_port_repr_netdev(dev)) {
struct ice_repr *repr = ice_netdev_to_repr(dev);
return repr->br_port;
} else if (netif_is_ice(dev)) {
struct ice_pf *pf = ice_netdev_to_pf(dev);
return pf->br_port;
}
return NULL;
}
static void
ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info,
u8 pf_id, u16 vf_vsi_idx)
{
rule_info->sw_act.vsi_handle = vf_vsi_idx;
rule_info->sw_act.flag |= ICE_FLTR_RX;
rule_info->sw_act.src = pf_id;
rule_info->priority = 5;
}
static void
ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info,
u16 pf_vsi_idx)
{
rule_info->sw_act.vsi_handle = pf_vsi_idx;
rule_info->sw_act.flag |= ICE_FLTR_TX;
rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
rule_info->flags_info.act_valid = true;
rule_info->priority = 5;
}
static int
ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule)
{
int err;
if (!rule)
return -EINVAL;
err = ice_rem_adv_rule_by_id(hw, rule);
kfree(rule);
return err;
}
static u16
ice_eswitch_br_get_lkups_cnt(u16 vid)
{
return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1;
}
static void
ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid)
{
if (ice_eswitch_br_is_vid_valid(vid)) {
list[1].type = ICE_VLAN_OFOS;
list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK);
list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
}
}
static struct ice_rule_query_data *
ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type,
const unsigned char *mac, u16 vid)
{
struct ice_adv_rule_info rule_info = { 0 };
struct ice_rule_query_data *rule;
struct ice_adv_lkup_elem *list;
u16 lkups_cnt;
int err;
lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return ERR_PTR(-ENOMEM);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list) {
err = -ENOMEM;
goto err_list_alloc;
}
switch (port_type) {
case ICE_ESWITCH_BR_UPLINK_PORT:
ice_eswitch_br_egress_rule_setup(&rule_info, vsi_idx);
break;
case ICE_ESWITCH_BR_VF_REPR_PORT:
ice_eswitch_br_ingress_rule_setup(&rule_info, hw->pf_id,
vsi_idx);
break;
default:
err = -EINVAL;
goto err_add_rule;
}
list[0].type = ICE_MAC_OFOS;
ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac);
eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr);
ice_eswitch_br_add_vlan_lkup(list, vid);
rule_info.need_pass_l2 = true;
rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
if (err)
goto err_add_rule;
kfree(list);
return rule;
err_add_rule:
kfree(list);
err_list_alloc:
kfree(rule);
return ERR_PTR(err);
}
static struct ice_rule_query_data *
ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx,
const unsigned char *mac, u16 vid)
{
struct ice_adv_rule_info rule_info = { 0 };
struct ice_rule_query_data *rule;
struct ice_adv_lkup_elem *list;
int err = -ENOMEM;
u16 lkups_cnt;
lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
goto err_exit;
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
goto err_list_alloc;
list[0].type = ICE_MAC_OFOS;
ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
ice_eswitch_br_add_vlan_lkup(list, vid);
rule_info.allow_pass_l2 = true;
rule_info.sw_act.vsi_handle = vsi_idx;
rule_info.sw_act.fltr_act = ICE_NOP;
rule_info.priority = 5;
err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
if (err)
goto err_add_rule;
kfree(list);
return rule;
err_add_rule:
kfree(list);
err_list_alloc:
kfree(rule);
err_exit:
return ERR_PTR(err);
}
static struct ice_esw_br_flow *
ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx,
int port_type, const unsigned char *mac, u16 vid)
{
struct ice_rule_query_data *fwd_rule, *guard_rule;
struct ice_esw_br_flow *flow;
int err;
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
if (!flow)
return ERR_PTR(-ENOMEM);
fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac,
vid);
err = PTR_ERR_OR_ZERO(fwd_rule);
if (err) {
dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n",
port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
err);
goto err_fwd_rule;
}
guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid);
err = PTR_ERR_OR_ZERO(guard_rule);
if (err) {
dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n",
port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
err);
goto err_guard_rule;
}
flow->fwd_rule = fwd_rule;
flow->guard_rule = guard_rule;
return flow;
err_guard_rule:
ice_eswitch_br_rule_delete(hw, fwd_rule);
err_fwd_rule:
kfree(flow);
return ERR_PTR(err);
}
static struct ice_esw_br_fdb_entry *
ice_eswitch_br_fdb_find(struct ice_esw_br *bridge, const unsigned char *mac,
u16 vid)
{
struct ice_esw_br_fdb_data data = {
.vid = vid,
};
ether_addr_copy(data.addr, mac);
return rhashtable_lookup_fast(&bridge->fdb_ht, &data,
ice_fdb_ht_params);
}
static void
ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow)
{
struct device *dev = ice_pf_to_dev(pf);
int err;
err = ice_eswitch_br_rule_delete(&pf->hw, flow->fwd_rule);
if (err)
dev_err(dev, "Failed to delete FDB forward rule, err: %d\n",
err);
err = ice_eswitch_br_rule_delete(&pf->hw, flow->guard_rule);
if (err)
dev_err(dev, "Failed to delete FDB guard rule, err: %d\n",
err);
kfree(flow);
}
static struct ice_esw_br_vlan *
ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
{
struct ice_pf *pf = bridge->br_offloads->pf;
struct device *dev = ice_pf_to_dev(pf);
struct ice_esw_br_port *port;
struct ice_esw_br_vlan *vlan;
port = xa_load(&bridge->ports, vsi_idx);
if (!port) {
dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx);
return ERR_PTR(-EINVAL);
}
vlan = xa_load(&port->vlans, vid);
if (!vlan) {
dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n",
vsi_idx);
return ERR_PTR(-EINVAL);
}
return vlan;
}
static void
ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge,
struct ice_esw_br_fdb_entry *fdb_entry)
{
struct ice_pf *pf = bridge->br_offloads->pf;
rhashtable_remove_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
ice_fdb_ht_params);
list_del(&fdb_entry->list);
ice_eswitch_br_flow_delete(pf, fdb_entry->flow);
kfree(fdb_entry);
}
static void
ice_eswitch_br_fdb_offload_notify(struct net_device *dev,
const unsigned char *mac, u16 vid,
unsigned long val)
{
struct switchdev_notifier_fdb_info fdb_info = {
.addr = mac,
.vid = vid,
.offloaded = true,
};
call_switchdev_notifiers(val, dev, &fdb_info.info, NULL);
}
static void
ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge,
struct ice_esw_br_fdb_entry *entry)
{
if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER))
ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr,
entry->data.vid,
SWITCHDEV_FDB_DEL_TO_BRIDGE);
ice_eswitch_br_fdb_entry_delete(bridge, entry);
}
static void
ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br *bridge,
const unsigned char *mac, u16 vid)
{
struct ice_pf *pf = bridge->br_offloads->pf;
struct ice_esw_br_fdb_entry *fdb_entry;
struct device *dev = ice_pf_to_dev(pf);
fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
if (!fdb_entry) {
dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n",
mac, vid);
return;
}
trace_ice_eswitch_br_fdb_entry_find_and_delete(fdb_entry);
ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
}
static void
ice_eswitch_br_fdb_entry_create(struct net_device *netdev,
struct ice_esw_br_port *br_port,
bool added_by_user,
const unsigned char *mac, u16 vid)
{
struct ice_esw_br *bridge = br_port->bridge;
struct ice_pf *pf = bridge->br_offloads->pf;
struct device *dev = ice_pf_to_dev(pf);
struct ice_esw_br_fdb_entry *fdb_entry;
struct ice_esw_br_flow *flow;
struct ice_esw_br_vlan *vlan;
struct ice_hw *hw = &pf->hw;
unsigned long event;
int err;
/* untagged filtering is not yet supported */
if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid)
return;
if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) {
vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx,
vid);
if (IS_ERR(vlan)) {
dev_err(dev, "Failed to find vlan lookup, err: %ld\n",
PTR_ERR(vlan));
return;
}
}
fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
if (fdb_entry)
ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL);
if (!fdb_entry) {
err = -ENOMEM;
goto err_exit;
}
flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx,
br_port->type, mac, vid);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto err_add_flow;
}
ether_addr_copy(fdb_entry->data.addr, mac);
fdb_entry->data.vid = vid;
fdb_entry->br_port = br_port;
fdb_entry->flow = flow;
fdb_entry->dev = netdev;
fdb_entry->last_use = jiffies;
event = SWITCHDEV_FDB_ADD_TO_BRIDGE;
if (added_by_user) {
fdb_entry->flags |= ICE_ESWITCH_BR_FDB_ADDED_BY_USER;
event = SWITCHDEV_FDB_OFFLOADED;
}
err = rhashtable_insert_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
ice_fdb_ht_params);
if (err)
goto err_fdb_insert;
list_add(&fdb_entry->list, &bridge->fdb_list);
trace_ice_eswitch_br_fdb_entry_create(fdb_entry);
ice_eswitch_br_fdb_offload_notify(netdev, mac, vid, event);
return;
err_fdb_insert:
ice_eswitch_br_flow_delete(pf, flow);
err_add_flow:
kfree(fdb_entry);
err_exit:
dev_err(dev, "Failed to create fdb entry, err: %d\n", err);
}
static void
ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work *fdb_work)
{
kfree(fdb_work->fdb_info.addr);
kfree(fdb_work);
}
static void
ice_eswitch_br_fdb_event_work(struct work_struct *work)
{
struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work);
bool added_by_user = fdb_work->fdb_info.added_by_user;
const unsigned char *mac = fdb_work->fdb_info.addr;
u16 vid = fdb_work->fdb_info.vid;
struct ice_esw_br_port *br_port;
rtnl_lock();
br_port = ice_eswitch_br_netdev_to_port(fdb_work->dev);
if (!br_port)
goto err_exit;
switch (fdb_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
ice_eswitch_br_fdb_entry_create(fdb_work->dev, br_port,
added_by_user, mac, vid);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
ice_eswitch_br_fdb_entry_find_and_delete(br_port->bridge,
mac, vid);
break;
default:
goto err_exit;
}
err_exit:
rtnl_unlock();
dev_put(fdb_work->dev);
ice_eswitch_br_fdb_work_dealloc(fdb_work);
}
static struct ice_esw_br_fdb_work *
ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info *fdb_info,
struct net_device *dev,
unsigned long event)
{
struct ice_esw_br_fdb_work *work;
unsigned char *mac;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return ERR_PTR(-ENOMEM);
INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work);
memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
mac = kzalloc(ETH_ALEN, GFP_ATOMIC);
if (!mac) {
kfree(work);
return ERR_PTR(-ENOMEM);
}
ether_addr_copy(mac, fdb_info->addr);
work->fdb_info.addr = mac;
work->event = event;
work->dev = dev;
return work;
}
static int
ice_eswitch_br_switchdev_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct switchdev_notifier_fdb_info *fdb_info;
struct switchdev_notifier_info *info = ptr;
struct ice_esw_br_offloads *br_offloads;
struct ice_esw_br_fdb_work *work;
struct netlink_ext_ack *extack;
struct net_device *upper;
br_offloads = ice_nb_to_br_offloads(nb, switchdev_nb);
extack = switchdev_notifier_info_to_extack(ptr);
upper = netdev_master_upper_dev_get_rcu(dev);
if (!upper)
return NOTIFY_DONE;
if (!netif_is_bridge_master(upper))
return NOTIFY_DONE;
if (!ice_eswitch_br_is_dev_valid(dev))
return NOTIFY_DONE;
if (!ice_eswitch_br_netdev_to_port(dev))
return NOTIFY_DONE;
switch (event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb_info = container_of(info, typeof(*fdb_info), info);
work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event);
if (IS_ERR(work)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to init switchdev fdb work");
return notifier_from_errno(PTR_ERR(work));
}
dev_hold(dev);
queue_work(br_offloads->wq, &work->work);
break;
default:
break;
}
return NOTIFY_DONE;
}
static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
{
struct ice_esw_br_fdb_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
}
static void
ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable)
{
if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING))
return;
ice_eswitch_br_fdb_flush(bridge);
if (enable)
bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING;
else
bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING;
}
static void
ice_eswitch_br_clear_pvid(struct ice_esw_br_port *port)
{
struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, port->pvid, 0);
struct ice_vsi_vlan_ops *vlan_ops;
vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
vlan_ops->del_vlan(port->vsi, &port_vlan);
vlan_ops->clear_port_vlan(port->vsi);
ice_vf_vsi_disable_port_vlan(port->vsi);
port->pvid = 0;
}
static void
ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port,
struct ice_esw_br_vlan *vlan)
{
struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
struct ice_esw_br *bridge = port->bridge;
trace_ice_eswitch_br_vlan_cleanup(vlan);
list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
if (vlan->vid == fdb_entry->data.vid)
ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
}
xa_erase(&port->vlans, vlan->vid);
if (port->pvid == vlan->vid)
ice_eswitch_br_clear_pvid(port);
kfree(vlan);
}
static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port)
{
struct ice_esw_br_vlan *vlan;
unsigned long index;
xa_for_each(&port->vlans, index, vlan)
ice_eswitch_br_vlan_cleanup(port, vlan);
}
static int
ice_eswitch_br_set_pvid(struct ice_esw_br_port *port,
struct ice_esw_br_vlan *vlan)
{
struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, vlan->vid, 0);
struct device *dev = ice_pf_to_dev(port->vsi->back);
struct ice_vsi_vlan_ops *vlan_ops;
int err;
if (port->pvid == vlan->vid || vlan->vid == 1)
return 0;
/* Setting port vlan on uplink isn't supported by hw */
if (port->type == ICE_ESWITCH_BR_UPLINK_PORT)
return -EOPNOTSUPP;
if (port->pvid) {
dev_info(dev,
"Port VLAN (vsi=%u, vid=%u) already exists on the port, remove it before adding new one\n",
port->vsi_idx, port->pvid);
return -EEXIST;
}
ice_vf_vsi_enable_port_vlan(port->vsi);
vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
err = vlan_ops->set_port_vlan(port->vsi, &port_vlan);
if (err)
return err;
err = vlan_ops->add_vlan(port->vsi, &port_vlan);
if (err)
return err;
ice_eswitch_br_port_vlans_flush(port);
port->pvid = vlan->vid;
return 0;
}
static struct ice_esw_br_vlan *
ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port)
{
struct device *dev = ice_pf_to_dev(port->vsi->back);
struct ice_esw_br_vlan *vlan;
int err;
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
if (!vlan)
return ERR_PTR(-ENOMEM);
vlan->vid = vid;
vlan->flags = flags;
if ((flags & BRIDGE_VLAN_INFO_PVID) &&
(flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
err = ice_eswitch_br_set_pvid(port, vlan);
if (err)
goto err_set_pvid;
} else if ((flags & BRIDGE_VLAN_INFO_PVID) ||
(flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
dev_info(dev, "VLAN push and pop are supported only simultaneously\n");
err = -EOPNOTSUPP;
goto err_set_pvid;
}
err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL);
if (err)
goto err_insert;
trace_ice_eswitch_br_vlan_create(vlan);
return vlan;
err_insert:
if (port->pvid)
ice_eswitch_br_clear_pvid(port);
err_set_pvid:
kfree(vlan);
return ERR_PTR(err);
}
static int
ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid,
u16 flags, struct netlink_ext_ack *extack)
{
struct ice_esw_br_port *port;
struct ice_esw_br_vlan *vlan;
port = xa_load(&bridge->ports, vsi_idx);
if (!port)
return -EINVAL;
if (port->pvid) {
dev_info(ice_pf_to_dev(port->vsi->back),
"Port VLAN (vsi=%u, vid=%d) exists on the port, remove it to add trunk VLANs\n",
port->vsi_idx, port->pvid);
return -EEXIST;
}
vlan = xa_load(&port->vlans, vid);
if (vlan) {
if (vlan->flags == flags)
return 0;
ice_eswitch_br_vlan_cleanup(port, vlan);
}
vlan = ice_eswitch_br_vlan_create(vid, flags, port);
if (IS_ERR(vlan)) {
NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u",
vid, vsi_idx);
return PTR_ERR(vlan);
}
return 0;
}
static void
ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
{
struct ice_esw_br_port *port;
struct ice_esw_br_vlan *vlan;
port = xa_load(&bridge->ports, vsi_idx);
if (!port)
return;
vlan = xa_load(&port->vlans, vid);
if (!vlan)
return;
ice_eswitch_br_vlan_cleanup(port, vlan);
}
static int
ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
struct switchdev_obj_port_vlan *vlan;
int err;
if (!br_port)
return -EINVAL;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
err = ice_eswitch_br_port_vlan_add(br_port->bridge,
br_port->vsi_idx, vlan->vid,
vlan->flags, extack);
return err;
default:
return -EOPNOTSUPP;
}
}
static int
ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx,
const struct switchdev_obj *obj)
{
struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
struct switchdev_obj_port_vlan *vlan;
if (!br_port)
return -EINVAL;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx,
vlan->vid);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int
ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
if (!br_port)
return -EINVAL;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
ice_eswitch_br_vlan_filtering_set(br_port->bridge,
attr->u.vlan_filtering);
return 0;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
br_port->bridge->ageing_time =
clock_t_to_jiffies(attr->u.ageing_time);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int
ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
int err;
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
err = switchdev_handle_port_obj_add(dev, ptr,
ice_eswitch_br_is_dev_valid,
ice_eswitch_br_port_obj_add);
break;
case SWITCHDEV_PORT_OBJ_DEL:
err = switchdev_handle_port_obj_del(dev, ptr,
ice_eswitch_br_is_dev_valid,
ice_eswitch_br_port_obj_del);
break;
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
ice_eswitch_br_is_dev_valid,
ice_eswitch_br_port_obj_attr_set);
break;
default:
err = 0;
}
return notifier_from_errno(err);
}
static void
ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
struct ice_esw_br_port *br_port)
{
struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
struct ice_vsi *vsi = br_port->vsi;
list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
if (br_port == fdb_entry->br_port)
ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
}
if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back)
vsi->back->br_port = NULL;
else if (vsi->vf && vsi->vf->repr)
vsi->vf->repr->br_port = NULL;
xa_erase(&bridge->ports, br_port->vsi_idx);
ice_eswitch_br_port_vlans_flush(br_port);
kfree(br_port);
}
static struct ice_esw_br_port *
ice_eswitch_br_port_init(struct ice_esw_br *bridge)
{
struct ice_esw_br_port *br_port;
br_port = kzalloc(sizeof(*br_port), GFP_KERNEL);
if (!br_port)
return ERR_PTR(-ENOMEM);
xa_init(&br_port->vlans);
br_port->bridge = bridge;
return br_port;
}
static int
ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
struct ice_repr *repr)
{
struct ice_esw_br_port *br_port;
int err;
br_port = ice_eswitch_br_port_init(bridge);
if (IS_ERR(br_port))
return PTR_ERR(br_port);
br_port->vsi = repr->src_vsi;
br_port->vsi_idx = br_port->vsi->idx;
br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT;
repr->br_port = br_port;
err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
if (err) {
ice_eswitch_br_port_deinit(bridge, br_port);
return err;
}
return 0;
}
static int
ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf)
{
struct ice_vsi *vsi = pf->switchdev.uplink_vsi;
struct ice_esw_br_port *br_port;
int err;
br_port = ice_eswitch_br_port_init(bridge);
if (IS_ERR(br_port))
return PTR_ERR(br_port);
br_port->vsi = vsi;
br_port->vsi_idx = br_port->vsi->idx;
br_port->type = ICE_ESWITCH_BR_UPLINK_PORT;
pf->br_port = br_port;
err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
if (err) {
ice_eswitch_br_port_deinit(bridge, br_port);
return err;
}
return 0;
}
static void
ice_eswitch_br_ports_flush(struct ice_esw_br *bridge)
{
struct ice_esw_br_port *port;
unsigned long i;
xa_for_each(&bridge->ports, i, port)
ice_eswitch_br_port_deinit(bridge, port);
}
static void
ice_eswitch_br_deinit(struct ice_esw_br_offloads *br_offloads,
struct ice_esw_br *bridge)
{
if (!bridge)
return;
/* Cleanup all the ports that were added asynchronously
* through NETDEV_CHANGEUPPER event.
*/
ice_eswitch_br_ports_flush(bridge);
WARN_ON(!xa_empty(&bridge->ports));
xa_destroy(&bridge->ports);
rhashtable_destroy(&bridge->fdb_ht);
br_offloads->bridge = NULL;
kfree(bridge);
}
static struct ice_esw_br *
ice_eswitch_br_init(struct ice_esw_br_offloads *br_offloads, int ifindex)
{
struct ice_esw_br *bridge;
int err;
bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return ERR_PTR(-ENOMEM);
err = rhashtable_init(&bridge->fdb_ht, &ice_fdb_ht_params);
if (err) {
kfree(bridge);
return ERR_PTR(err);
}
INIT_LIST_HEAD(&bridge->fdb_list);
bridge->br_offloads = br_offloads;
bridge->ifindex = ifindex;
bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
xa_init(&bridge->ports);
br_offloads->bridge = bridge;
return bridge;
}
static struct ice_esw_br *
ice_eswitch_br_get(struct ice_esw_br_offloads *br_offloads, int ifindex,
struct netlink_ext_ack *extack)
{
struct ice_esw_br *bridge = br_offloads->bridge;
if (bridge) {
if (bridge->ifindex != ifindex) {
NL_SET_ERR_MSG_MOD(extack,
"Only one bridge is supported per eswitch");
return ERR_PTR(-EOPNOTSUPP);
}
return bridge;
}
/* Create the bridge if it doesn't exist yet */
bridge = ice_eswitch_br_init(br_offloads, ifindex);
if (IS_ERR(bridge))
NL_SET_ERR_MSG_MOD(extack, "Failed to init the bridge");
return bridge;
}
static void
ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads *br_offloads,
struct ice_esw_br *bridge)
{
/* Remove the bridge if it exists and there are no ports left */
if (!bridge || !xa_empty(&bridge->ports))
return;
ice_eswitch_br_deinit(br_offloads, bridge);
}
static int
ice_eswitch_br_port_unlink(struct ice_esw_br_offloads *br_offloads,
struct net_device *dev, int ifindex,
struct netlink_ext_ack *extack)
{
struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(dev);
struct ice_esw_br *bridge;
if (!br_port) {
NL_SET_ERR_MSG_MOD(extack,
"Port representor is not attached to any bridge");
return -EINVAL;
}
if (br_port->bridge->ifindex != ifindex) {
NL_SET_ERR_MSG_MOD(extack,
"Port representor is attached to another bridge");
return -EINVAL;
}
bridge = br_port->bridge;
trace_ice_eswitch_br_port_unlink(br_port);
ice_eswitch_br_port_deinit(br_port->bridge, br_port);
ice_eswitch_br_verify_deinit(br_offloads, bridge);
return 0;
}
static int
ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads,
struct net_device *dev, int ifindex,
struct netlink_ext_ack *extack)
{
struct ice_esw_br *bridge;
int err;
if (ice_eswitch_br_netdev_to_port(dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Port is already attached to the bridge");
return -EINVAL;
}
bridge = ice_eswitch_br_get(br_offloads, ifindex, extack);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
if (ice_is_port_repr_netdev(dev)) {
struct ice_repr *repr = ice_netdev_to_repr(dev);
err = ice_eswitch_br_vf_repr_port_init(bridge, repr);
trace_ice_eswitch_br_port_link(repr->br_port);
} else {
struct ice_pf *pf = ice_netdev_to_pf(dev);
err = ice_eswitch_br_uplink_port_init(bridge, pf);
trace_ice_eswitch_br_port_link(pf->br_port);
}
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to init bridge port");
goto err_port_init;
}
return 0;
err_port_init:
ice_eswitch_br_verify_deinit(br_offloads, bridge);
return err;
}
static int
ice_eswitch_br_port_changeupper(struct notifier_block *nb, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info = ptr;
struct ice_esw_br_offloads *br_offloads;
struct netlink_ext_ack *extack;
struct net_device *upper;
br_offloads = ice_nb_to_br_offloads(nb, netdev_nb);
if (!ice_eswitch_br_is_dev_valid(dev))
return 0;
upper = info->upper_dev;
if (!netif_is_bridge_master(upper))
return 0;
extack = netdev_notifier_info_to_extack(&info->info);
if (info->linking)
return ice_eswitch_br_port_link(br_offloads, dev,
upper->ifindex, extack);
else
return ice_eswitch_br_port_unlink(br_offloads, dev,
upper->ifindex, extack);
}
static int
ice_eswitch_br_port_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
int err = 0;
switch (event) {
case NETDEV_CHANGEUPPER:
err = ice_eswitch_br_port_changeupper(nb, ptr);
break;
}
return notifier_from_errno(err);
}
static void
ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
{
struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads;
ASSERT_RTNL();
if (!br_offloads)
return;
ice_eswitch_br_deinit(br_offloads, br_offloads->bridge);
pf->switchdev.br_offloads = NULL;
kfree(br_offloads);
}
static struct ice_esw_br_offloads *
ice_eswitch_br_offloads_alloc(struct ice_pf *pf)
{
struct ice_esw_br_offloads *br_offloads;
ASSERT_RTNL();
if (pf->switchdev.br_offloads)
return ERR_PTR(-EEXIST);
br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL);
if (!br_offloads)
return ERR_PTR(-ENOMEM);
pf->switchdev.br_offloads = br_offloads;
br_offloads->pf = pf;
return br_offloads;
}
void
ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
{
struct ice_esw_br_offloads *br_offloads;
br_offloads = pf->switchdev.br_offloads;
if (!br_offloads)
return;
cancel_delayed_work_sync(&br_offloads->update_work);
unregister_netdevice_notifier(&br_offloads->netdev_nb);
unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
unregister_switchdev_notifier(&br_offloads->switchdev_nb);
destroy_workqueue(br_offloads->wq);
/* Although notifier block is unregistered just before,
* so we don't get any new events, some events might be
* already in progress. Hold the rtnl lock and wait for
* them to finished.
*/
rtnl_lock();
ice_eswitch_br_offloads_dealloc(pf);
rtnl_unlock();
}
static void ice_eswitch_br_update(struct ice_esw_br_offloads *br_offloads)
{
struct ice_esw_br *bridge = br_offloads->bridge;
struct ice_esw_br_fdb_entry *entry, *tmp;
if (!bridge)
return;
rtnl_lock();
list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
if (entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)
continue;
if (time_is_after_eq_jiffies(entry->last_use +
bridge->ageing_time))
continue;
ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
}
rtnl_unlock();
}
static void ice_eswitch_br_update_work(struct work_struct *work)
{
struct ice_esw_br_offloads *br_offloads;
br_offloads = ice_work_to_br_offloads(work);
ice_eswitch_br_update(br_offloads);
queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
ICE_ESW_BRIDGE_UPDATE_INTERVAL);
}
int
ice_eswitch_br_offloads_init(struct ice_pf *pf)
{
struct ice_esw_br_offloads *br_offloads;
struct device *dev = ice_pf_to_dev(pf);
int err;
rtnl_lock();
br_offloads = ice_eswitch_br_offloads_alloc(pf);
rtnl_unlock();
if (IS_ERR(br_offloads)) {
dev_err(dev, "Failed to init eswitch bridge\n");
return PTR_ERR(br_offloads);
}
br_offloads->wq = alloc_ordered_workqueue("ice_bridge_wq", 0);
if (!br_offloads->wq) {
err = -ENOMEM;
dev_err(dev, "Failed to allocate bridge workqueue\n");
goto err_alloc_wq;
}
br_offloads->switchdev_nb.notifier_call =
ice_eswitch_br_switchdev_event;
err = register_switchdev_notifier(&br_offloads->switchdev_nb);
if (err) {
dev_err(dev,
"Failed to register switchdev notifier\n");
goto err_reg_switchdev_nb;
}
br_offloads->switchdev_blk.notifier_call =
ice_eswitch_br_event_blocking;
err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
if (err) {
dev_err(dev,
"Failed to register bridge blocking switchdev notifier\n");
goto err_reg_switchdev_blk;
}
br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event;
err = register_netdevice_notifier(&br_offloads->netdev_nb);
if (err) {
dev_err(dev,
"Failed to register bridge port event notifier\n");
goto err_reg_netdev_nb;
}
INIT_DELAYED_WORK(&br_offloads->update_work,
ice_eswitch_br_update_work);
queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
ICE_ESW_BRIDGE_UPDATE_INTERVAL);
return 0;
err_reg_netdev_nb:
unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
err_reg_switchdev_blk:
unregister_switchdev_notifier(&br_offloads->switchdev_nb);
err_reg_switchdev_nb:
destroy_workqueue(br_offloads->wq);
err_alloc_wq:
rtnl_lock();
ice_eswitch_br_offloads_dealloc(pf);
rtnl_unlock();
return err;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2023, Intel Corporation. */
#ifndef _ICE_ESWITCH_BR_H_
#define _ICE_ESWITCH_BR_H_
#include <linux/rhashtable.h>
#include <linux/workqueue.h>
struct ice_esw_br_fdb_data {
unsigned char addr[ETH_ALEN];
u16 vid;
};
struct ice_esw_br_flow {
struct ice_rule_query_data *fwd_rule;
struct ice_rule_query_data *guard_rule;
};
enum {
ICE_ESWITCH_BR_FDB_ADDED_BY_USER = BIT(0),
};
struct ice_esw_br_fdb_entry {
struct ice_esw_br_fdb_data data;
struct rhash_head ht_node;
struct list_head list;
int flags;
struct net_device *dev;
struct ice_esw_br_port *br_port;
struct ice_esw_br_flow *flow;
unsigned long last_use;
};
enum ice_esw_br_port_type {
ICE_ESWITCH_BR_UPLINK_PORT = 0,
ICE_ESWITCH_BR_VF_REPR_PORT = 1,
};
struct ice_esw_br_port {
struct ice_esw_br *bridge;
struct ice_vsi *vsi;
enum ice_esw_br_port_type type;
u16 vsi_idx;
u16 pvid;
struct xarray vlans;
};
enum {
ICE_ESWITCH_BR_VLAN_FILTERING = BIT(0),
};
struct ice_esw_br {
struct ice_esw_br_offloads *br_offloads;
struct xarray ports;
struct rhashtable fdb_ht;
struct list_head fdb_list;
int ifindex;
u32 flags;
unsigned long ageing_time;
};
struct ice_esw_br_offloads {
struct ice_pf *pf;
struct ice_esw_br *bridge;
struct notifier_block netdev_nb;
struct notifier_block switchdev_blk;
struct notifier_block switchdev_nb;
struct workqueue_struct *wq;
struct delayed_work update_work;
};
struct ice_esw_br_fdb_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
struct net_device *dev;
unsigned long event;
};
struct ice_esw_br_vlan {
u16 vid;
u16 flags;
};
#define ice_nb_to_br_offloads(nb, nb_name) \
container_of(nb, \
struct ice_esw_br_offloads, \
nb_name)
#define ice_work_to_br_offloads(w) \
container_of(w, \
struct ice_esw_br_offloads, \
update_work.work)
#define ice_work_to_fdb_work(w) \
container_of(w, \
struct ice_esw_br_fdb_work, \
work)
static inline bool ice_eswitch_br_is_vid_valid(u16 vid)
{
/* In trunk VLAN mode, for untagged traffic the bridge sends requests
* to offload VLAN 1 with pvid and untagged flags set. Since these
* flags are not supported, add a MAC filter instead.
*/
return vid > 1;
}
void
ice_eswitch_br_offloads_deinit(struct ice_pf *pf);
int
ice_eswitch_br_offloads_init(struct ice_pf *pf);
#endif /* _ICE_ESWITCH_BR_H_ */
...@@ -4076,3 +4076,28 @@ void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) ...@@ -4076,3 +4076,28 @@ void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
{ {
ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
} }
/**
* ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
* @vsi: pointer to VSI structure
* @set: set or unset the bit
*/
int
ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
{
struct ice_vsi_ctx ctx = {
.info = vsi->info,
};
ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
if (set)
ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
else
ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
return -ENODEV;
vsi->info = ctx.info;
return 0;
}
...@@ -157,6 +157,7 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx); ...@@ -157,6 +157,7 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx); void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx); void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set);
int ice_vsi_add_vlan_zero(struct ice_vsi *vsi); int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi); int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi); bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);
......
...@@ -80,7 +80,7 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, ...@@ -80,7 +80,7 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
void *data, void *data,
void (*cleanup)(struct flow_block_cb *block_cb)); void (*cleanup)(struct flow_block_cb *block_cb));
bool netif_is_ice(struct net_device *dev) bool netif_is_ice(const struct net_device *dev)
{ {
return dev && (dev->netdev_ops == &ice_netdev_ops); return dev && (dev->netdev_ops == &ice_netdev_ops);
} }
...@@ -5704,7 +5704,7 @@ static void ice_set_rx_mode(struct net_device *netdev) ...@@ -5704,7 +5704,7 @@ static void ice_set_rx_mode(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
if (!vsi) if (!vsi || ice_is_switchdev_running(vsi->back))
return; return;
/* Set the flags to synchronize filters /* Set the flags to synchronize filters
......
...@@ -254,7 +254,7 @@ static const struct net_device_ops ice_repr_netdev_ops = { ...@@ -254,7 +254,7 @@ static const struct net_device_ops ice_repr_netdev_ops = {
* ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
* @netdev: pointer to netdev * @netdev: pointer to netdev
*/ */
bool ice_is_port_repr_netdev(struct net_device *netdev) bool ice_is_port_repr_netdev(const struct net_device *netdev)
{ {
return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops); return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
} }
......
...@@ -12,6 +12,7 @@ struct ice_repr { ...@@ -12,6 +12,7 @@ struct ice_repr {
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
struct net_device *netdev; struct net_device *netdev;
struct metadata_dst *dst; struct metadata_dst *dst;
struct ice_esw_br_port *br_port;
#ifdef CONFIG_ICE_SWITCHDEV #ifdef CONFIG_ICE_SWITCHDEV
/* info about slow path rule */ /* info about slow path rule */
struct ice_rule_query_data sp_rule; struct ice_rule_query_data sp_rule;
...@@ -27,5 +28,5 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr); ...@@ -27,5 +28,5 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr);
void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi); void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
bool ice_is_port_repr_netdev(struct net_device *netdev); bool ice_is_port_repr_netdev(const struct net_device *netdev);
#endif #endif
...@@ -2272,6 +2272,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, ...@@ -2272,6 +2272,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
/* Propagate some data to the recipe database */ /* Propagate some data to the recipe database */
recps[idx].is_root = !!is_root; recps[idx].is_root = !!is_root;
recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
recps[idx].need_pass_l2 = root_bufs.content.act_ctrl &
ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl &
ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
recps[idx].chain_idx = root_bufs.content.result_indx & recps[idx].chain_idx = root_bufs.content.result_indx &
...@@ -4613,13 +4617,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { ...@@ -4613,13 +4617,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
* ice_find_recp - find a recipe * ice_find_recp - find a recipe
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @lkup_exts: extension sequence to match * @lkup_exts: extension sequence to match
* @tun_type: type of recipe tunnel * @rinfo: information regarding the rule e.g. priority and action info
* *
* Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
*/ */
static u16 static u16
ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
enum ice_sw_tunnel_type tun_type) const struct ice_adv_rule_info *rinfo)
{ {
bool refresh_required = true; bool refresh_required = true;
struct ice_sw_recipe *recp; struct ice_sw_recipe *recp;
...@@ -4680,9 +4684,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, ...@@ -4680,9 +4684,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
} }
/* If for "i"th recipe the found was never set to false /* If for "i"th recipe the found was never set to false
* then it means we found our match * then it means we found our match
* Also tun type of recipe needs to be checked * Also tun type and *_pass_l2 of recipe needs to be
* checked
*/ */
if (found && recp[i].tun_type == tun_type) if (found && recp[i].tun_type == rinfo->tun_type &&
recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
return i; /* Return the recipe ID */ return i; /* Return the recipe ID */
} }
} }
...@@ -4952,6 +4959,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, ...@@ -4952,6 +4959,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
unsigned long *profiles) unsigned long *profiles)
{ {
DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
struct ice_aqc_recipe_content *content;
struct ice_aqc_recipe_data_elem *tmp; struct ice_aqc_recipe_data_elem *tmp;
struct ice_aqc_recipe_data_elem *buf; struct ice_aqc_recipe_data_elem *buf;
struct ice_recp_grp_entry *entry; struct ice_recp_grp_entry *entry;
...@@ -5012,6 +5020,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, ...@@ -5012,6 +5020,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
if (status) if (status)
goto err_unroll; goto err_unroll;
content = &buf[recps].content;
/* Clear the result index of the located recipe, as this will be /* Clear the result index of the located recipe, as this will be
* updated, if needed, later in the recipe creation process. * updated, if needed, later in the recipe creation process.
*/ */
...@@ -5022,26 +5032,24 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, ...@@ -5022,26 +5032,24 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
/* if the recipe is a non-root recipe RID should be programmed /* if the recipe is a non-root recipe RID should be programmed
* as 0 for the rules to be applied correctly. * as 0 for the rules to be applied correctly.
*/ */
buf[recps].content.rid = 0; content->rid = 0;
memset(&buf[recps].content.lkup_indx, 0, memset(&content->lkup_indx, 0,
sizeof(buf[recps].content.lkup_indx)); sizeof(content->lkup_indx));
/* All recipes use look-up index 0 to match switch ID. */ /* All recipes use look-up index 0 to match switch ID. */
buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
buf[recps].content.mask[0] = content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
* to be 0 * to be 0
*/ */
for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
buf[recps].content.lkup_indx[i] = 0x80; content->lkup_indx[i] = 0x80;
buf[recps].content.mask[i] = 0; content->mask[i] = 0;
} }
for (i = 0; i < entry->r_group.n_val_pairs; i++) { for (i = 0; i < entry->r_group.n_val_pairs; i++) {
buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i]; content->lkup_indx[i + 1] = entry->fv_idx[i];
buf[recps].content.mask[i + 1] = content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]);
cpu_to_le16(entry->fv_mask[i]);
} }
if (rm->n_grp_count > 1) { if (rm->n_grp_count > 1) {
...@@ -5055,7 +5063,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, ...@@ -5055,7 +5063,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
} }
entry->chain_idx = chain_idx; entry->chain_idx = chain_idx;
buf[recps].content.result_indx = content->result_indx =
ICE_AQ_RECIPE_RESULT_EN | ICE_AQ_RECIPE_RESULT_EN |
((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) & ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
ICE_AQ_RECIPE_RESULT_DATA_M); ICE_AQ_RECIPE_RESULT_DATA_M);
...@@ -5069,7 +5077,13 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, ...@@ -5069,7 +5077,13 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
ICE_MAX_NUM_RECIPES); ICE_MAX_NUM_RECIPES);
set_bit(buf[recps].recipe_indx, set_bit(buf[recps].recipe_indx,
(unsigned long *)buf[recps].recipe_bitmap); (unsigned long *)buf[recps].recipe_bitmap);
buf[recps].content.act_ctrl_fwd_priority = rm->priority; content->act_ctrl_fwd_priority = rm->priority;
if (rm->need_pass_l2)
content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
if (rm->allow_pass_l2)
content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
recps++; recps++;
} }
...@@ -5107,9 +5121,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, ...@@ -5107,9 +5121,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
if (status) if (status)
goto err_unroll; goto err_unroll;
content = &buf[recps].content;
buf[recps].recipe_indx = (u8)rid; buf[recps].recipe_indx = (u8)rid;
buf[recps].content.rid = (u8)rid; content->rid = (u8)rid;
buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT; content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
/* the new entry created should also be part of rg_list to /* the new entry created should also be part of rg_list to
* make sure we have complete recipe * make sure we have complete recipe
*/ */
...@@ -5121,16 +5137,13 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, ...@@ -5121,16 +5137,13 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
goto err_unroll; goto err_unroll;
} }
last_chain_entry->rid = rid; last_chain_entry->rid = rid;
memset(&buf[recps].content.lkup_indx, 0, memset(&content->lkup_indx, 0, sizeof(content->lkup_indx));
sizeof(buf[recps].content.lkup_indx));
/* All recipes use look-up index 0 to match switch ID. */ /* All recipes use look-up index 0 to match switch ID. */
buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
buf[recps].content.mask[0] = content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
buf[recps].content.lkup_indx[i] = content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
ICE_AQ_RECIPE_LKUP_IGNORE; content->mask[i] = 0;
buf[recps].content.mask[i] = 0;
} }
i = 1; i = 1;
...@@ -5142,8 +5155,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, ...@@ -5142,8 +5155,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
list_for_each_entry(entry, &rm->rg_list, l_entry) { list_for_each_entry(entry, &rm->rg_list, l_entry) {
last_chain_entry->fv_idx[i] = entry->chain_idx; last_chain_entry->fv_idx[i] = entry->chain_idx;
buf[recps].content.lkup_indx[i] = entry->chain_idx; content->lkup_indx[i] = entry->chain_idx;
buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF); content->mask[i++] = cpu_to_le16(0xFFFF);
set_bit(entry->rid, rm->r_bitmap); set_bit(entry->rid, rm->r_bitmap);
} }
list_add(&last_chain_entry->l_entry, &rm->rg_list); list_add(&last_chain_entry->l_entry, &rm->rg_list);
...@@ -5155,7 +5168,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, ...@@ -5155,7 +5168,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
status = -EINVAL; status = -EINVAL;
goto err_unroll; goto err_unroll;
} }
buf[recps].content.act_ctrl_fwd_priority = rm->priority; content->act_ctrl_fwd_priority = rm->priority;
recps++; recps++;
rm->root_rid = (u8)rid; rm->root_rid = (u8)rid;
...@@ -5220,6 +5233,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, ...@@ -5220,6 +5233,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
recp->n_grp_count = rm->n_grp_count; recp->n_grp_count = rm->n_grp_count;
recp->tun_type = rm->tun_type; recp->tun_type = rm->tun_type;
recp->need_pass_l2 = rm->need_pass_l2;
recp->allow_pass_l2 = rm->allow_pass_l2;
recp->recp_created = true; recp->recp_created = true;
} }
rm->root_buf = buf; rm->root_buf = buf;
...@@ -5388,6 +5403,9 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ...@@ -5388,6 +5403,9 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* set the recipe priority if specified */ /* set the recipe priority if specified */
rm->priority = (u8)rinfo->priority; rm->priority = (u8)rinfo->priority;
rm->need_pass_l2 = rinfo->need_pass_l2;
rm->allow_pass_l2 = rinfo->allow_pass_l2;
/* Find offsets from the field vector. Pick the first one for all the /* Find offsets from the field vector. Pick the first one for all the
* recipes. * recipes.
*/ */
...@@ -5403,7 +5421,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ...@@ -5403,7 +5421,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
} }
/* Look for a recipe which matches our requested fv / mask list */ /* Look for a recipe which matches our requested fv / mask list */
*rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type); *rid = ice_find_recp(hw, lkup_exts, rinfo);
if (*rid < ICE_MAX_NUM_RECIPES) if (*rid < ICE_MAX_NUM_RECIPES)
/* Success if found a recipe that match the existing criteria */ /* Success if found a recipe that match the existing criteria */
goto err_unroll; goto err_unroll;
...@@ -5839,7 +5857,9 @@ static bool ice_rules_equal(const struct ice_adv_rule_info *first, ...@@ -5839,7 +5857,9 @@ static bool ice_rules_equal(const struct ice_adv_rule_info *first,
return first->sw_act.flag == second->sw_act.flag && return first->sw_act.flag == second->sw_act.flag &&
first->tun_type == second->tun_type && first->tun_type == second->tun_type &&
first->vlan_type == second->vlan_type && first->vlan_type == second->vlan_type &&
first->src_vsi == second->src_vsi; first->src_vsi == second->src_vsi &&
first->need_pass_l2 == second->need_pass_l2 &&
first->allow_pass_l2 == second->allow_pass_l2;
} }
/** /**
...@@ -6078,7 +6098,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ...@@ -6078,7 +6098,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP || rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) { rinfo->sw_act.fltr_act == ICE_DROP_PACKET ||
rinfo->sw_act.fltr_act == ICE_NOP)) {
status = -EIO; status = -EIO;
goto free_pkt_profile; goto free_pkt_profile;
} }
...@@ -6089,7 +6110,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ...@@ -6089,7 +6110,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
goto free_pkt_profile; goto free_pkt_profile;
} }
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
rinfo->sw_act.fltr_act == ICE_NOP)
rinfo->sw_act.fwd_id.hw_vsi_id = rinfo->sw_act.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle); ice_get_hw_vsi_num(hw, vsi_handle);
...@@ -6159,6 +6181,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ...@@ -6159,6 +6181,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
ICE_SINGLE_ACT_VALID_BIT; ICE_SINGLE_ACT_VALID_BIT;
break; break;
case ICE_NOP:
act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
rinfo->sw_act.fwd_id.hw_vsi_id);
act &= ~ICE_SINGLE_ACT_VALID_BIT;
break;
default: default:
status = -EIO; status = -EIO;
goto err_ice_add_adv_rule; goto err_ice_add_adv_rule;
...@@ -6439,7 +6466,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ...@@ -6439,7 +6466,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
return -EIO; return -EIO;
} }
rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type); rid = ice_find_recp(hw, &lkup_exts, rinfo);
/* If did not find a recipe that match the existing criteria */ /* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES) if (rid == ICE_MAX_NUM_RECIPES)
return -EINVAL; return -EINVAL;
...@@ -6532,59 +6559,6 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw, ...@@ -6532,59 +6559,6 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
return -ENOENT; return -ENOENT;
} }
/**
* ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
* given VSI handle
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle for which we are supposed to remove all the rules.
*
* This function is used to remove all the rules for a given VSI and as soon
* as removing a rule fails, it will return immediately with the error code,
* else it will return success.
*/
int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
struct ice_vsi_list_map_info *map_info;
struct ice_adv_rule_info rinfo;
struct list_head *list_head;
struct ice_switch_info *sw;
int status;
u8 rid;
sw = hw->switch_info;
for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
if (!sw->recp_list[rid].recp_created)
continue;
if (!sw->recp_list[rid].adv_rule)
continue;
list_head = &sw->recp_list[rid].filt_rules;
list_for_each_entry_safe(list_itr, tmp_entry, list_head,
list_entry) {
rinfo = list_itr->rule_info;
if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
map_info = list_itr->vsi_list_info;
if (!map_info)
continue;
if (!test_bit(vsi_handle, map_info->vsi_map))
continue;
} else if (rinfo.sw_act.vsi_handle != vsi_handle) {
continue;
}
rinfo.sw_act.vsi_handle = vsi_handle;
status = ice_rem_adv_rule(hw, list_itr->lkups,
list_itr->lkups_cnt, &rinfo);
if (status)
return status;
}
}
return 0;
}
/** /**
* ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
......
...@@ -191,6 +191,8 @@ struct ice_adv_rule_info { ...@@ -191,6 +191,8 @@ struct ice_adv_rule_info {
u16 vlan_type; u16 vlan_type;
u16 fltr_rule_id; u16 fltr_rule_id;
u32 priority; u32 priority;
u16 need_pass_l2:1;
u16 allow_pass_l2:1;
u16 src_vsi; u16 src_vsi;
struct ice_sw_act_ctrl sw_act; struct ice_sw_act_ctrl sw_act;
struct ice_adv_rule_flags_info flags_info; struct ice_adv_rule_flags_info flags_info;
...@@ -254,6 +256,9 @@ struct ice_sw_recipe { ...@@ -254,6 +256,9 @@ struct ice_sw_recipe {
*/ */
u8 priority; u8 priority;
u8 need_pass_l2:1;
u8 allow_pass_l2:1;
struct list_head rg_list; struct list_head rg_list;
/* AQ buffer associated with this recipe */ /* AQ buffer associated with this recipe */
...@@ -379,7 +384,6 @@ int ...@@ -379,7 +384,6 @@ int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc); bool rm_vlan_promisc);
int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
int int
ice_rem_adv_rule_by_id(struct ice_hw *hw, ice_rem_adv_rule_by_id(struct ice_hw *hw,
struct ice_rule_query_data *remove_entry); struct ice_rule_query_data *remove_entry);
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#define _ICE_TRACE_H_ #define _ICE_TRACE_H_
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include "ice_eswitch_br.h"
/* ice_trace() macro enables shared code to refer to trace points /* ice_trace() macro enables shared code to refer to trace points
* like: * like:
...@@ -240,6 +241,95 @@ DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_req); ...@@ -240,6 +241,95 @@ DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_req);
DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_done); DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_done);
DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_complete); DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_complete);
DECLARE_EVENT_CLASS(ice_esw_br_fdb_template,
TP_PROTO(struct ice_esw_br_fdb_entry *fdb),
TP_ARGS(fdb),
TP_STRUCT__entry(__array(char, dev_name, IFNAMSIZ)
__array(unsigned char, addr, ETH_ALEN)
__field(u16, vid)
__field(int, flags)),
TP_fast_assign(strscpy(__entry->dev_name,
netdev_name(fdb->dev),
IFNAMSIZ);
memcpy(__entry->addr, fdb->data.addr, ETH_ALEN);
__entry->vid = fdb->data.vid;
__entry->flags = fdb->flags;),
TP_printk("net_device=%s addr=%pM vid=%u flags=%x",
__entry->dev_name,
__entry->addr,
__entry->vid,
__entry->flags)
);
DEFINE_EVENT(ice_esw_br_fdb_template,
ice_eswitch_br_fdb_entry_create,
TP_PROTO(struct ice_esw_br_fdb_entry *fdb),
TP_ARGS(fdb)
);
DEFINE_EVENT(ice_esw_br_fdb_template,
ice_eswitch_br_fdb_entry_find_and_delete,
TP_PROTO(struct ice_esw_br_fdb_entry *fdb),
TP_ARGS(fdb)
);
DECLARE_EVENT_CLASS(ice_esw_br_vlan_template,
TP_PROTO(struct ice_esw_br_vlan *vlan),
TP_ARGS(vlan),
TP_STRUCT__entry(__field(u16, vid)
__field(u16, flags)),
TP_fast_assign(__entry->vid = vlan->vid;
__entry->flags = vlan->flags;),
TP_printk("vid=%u flags=%x",
__entry->vid,
__entry->flags)
);
DEFINE_EVENT(ice_esw_br_vlan_template,
ice_eswitch_br_vlan_create,
TP_PROTO(struct ice_esw_br_vlan *vlan),
TP_ARGS(vlan)
);
DEFINE_EVENT(ice_esw_br_vlan_template,
ice_eswitch_br_vlan_cleanup,
TP_PROTO(struct ice_esw_br_vlan *vlan),
TP_ARGS(vlan)
);
#define ICE_ESW_BR_PORT_NAME_L 16
DECLARE_EVENT_CLASS(ice_esw_br_port_template,
TP_PROTO(struct ice_esw_br_port *port),
TP_ARGS(port),
TP_STRUCT__entry(__field(u16, vport_num)
__array(char, port_type, ICE_ESW_BR_PORT_NAME_L)),
TP_fast_assign(__entry->vport_num = port->vsi_idx;
if (port->type == ICE_ESWITCH_BR_UPLINK_PORT)
strscpy(__entry->port_type,
"Uplink",
ICE_ESW_BR_PORT_NAME_L);
else
strscpy(__entry->port_type,
"VF Representor",
ICE_ESW_BR_PORT_NAME_L);),
TP_printk("vport_num=%u port type=%s",
__entry->vport_num,
__entry->port_type)
);
DEFINE_EVENT(ice_esw_br_port_template,
ice_eswitch_br_port_link,
TP_PROTO(struct ice_esw_br_port *port),
TP_ARGS(port)
);
DEFINE_EVENT(ice_esw_br_port_template,
ice_eswitch_br_port_unlink,
TP_PROTO(struct ice_esw_br_port *port),
TP_ARGS(port)
);
/* End tracepoints */ /* End tracepoints */
#endif /* _ICE_TRACE_H_ */ #endif /* _ICE_TRACE_H_ */
......
...@@ -1033,6 +1033,7 @@ enum ice_sw_fwd_act_type { ...@@ -1033,6 +1033,7 @@ enum ice_sw_fwd_act_type {
ICE_FWD_TO_Q, ICE_FWD_TO_Q,
ICE_FWD_TO_QGRP, ICE_FWD_TO_QGRP,
ICE_DROP_PACKET, ICE_DROP_PACKET,
ICE_NOP,
ICE_INVAL_ACT ICE_INVAL_ACT
}; };
......
...@@ -21,6 +21,99 @@ noop_vlan(struct ice_vsi __always_unused *vsi) ...@@ -21,6 +21,99 @@ noop_vlan(struct ice_vsi __always_unused *vsi)
return 0; return 0;
} }
static void ice_port_vlan_on(struct ice_vsi *vsi)
{
struct ice_vsi_vlan_ops *vlan_ops;
struct ice_pf *pf = vsi->back;
if (ice_is_dvm_ena(&pf->hw)) {
vlan_ops = &vsi->outer_vlan_ops;
/* setup outer VLAN ops */
vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
/* setup inner VLAN ops */
vlan_ops = &vsi->inner_vlan_ops;
vlan_ops->add_vlan = noop_vlan_arg;
vlan_ops->del_vlan = noop_vlan_arg;
vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
} else {
vlan_ops = &vsi->inner_vlan_ops;
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
}
vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
}
static void ice_port_vlan_off(struct ice_vsi *vsi)
{
struct ice_vsi_vlan_ops *vlan_ops;
struct ice_pf *pf = vsi->back;
/* setup inner VLAN ops */
vlan_ops = &vsi->inner_vlan_ops;
vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
if (ice_is_dvm_ena(&pf->hw)) {
vlan_ops = &vsi->outer_vlan_ops;
vlan_ops->del_vlan = ice_vsi_del_vlan;
vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
} else {
vlan_ops->del_vlan = ice_vsi_del_vlan;
}
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
}
/**
* ice_vf_vsi_enable_port_vlan - Set VSI VLAN ops to support port VLAN
* @vsi: VF's VSI being configured
*
* The function won't create port VLAN, it only allows to create port VLAN
* using VLAN ops on the VF VSI.
*/
void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi)
{
if (WARN_ON_ONCE(!vsi->vf))
return;
ice_port_vlan_on(vsi);
}
/**
* ice_vf_vsi_disable_port_vlan - Clear VSI support for creating port VLAN
* @vsi: VF's VSI being configured
*
* The function should be called after removing port VLAN on VSI
* (using VLAN ops)
*/
void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi)
{
if (WARN_ON_ONCE(!vsi->vf))
return;
ice_port_vlan_off(vsi);
}
/** /**
* ice_vf_vsi_init_vlan_ops - Initialize default VSI VLAN ops for VF VSI * ice_vf_vsi_init_vlan_ops - Initialize default VSI VLAN ops for VF VSI
* @vsi: VF's VSI being configured * @vsi: VF's VSI being configured
...@@ -39,91 +132,18 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) ...@@ -39,91 +132,18 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
if (WARN_ON(!vf)) if (WARN_ON(!vf))
return; return;
if (ice_is_dvm_ena(&pf->hw)) { if (ice_vf_is_port_vlan_ena(vf))
vlan_ops = &vsi->outer_vlan_ops; ice_port_vlan_on(vsi);
else
ice_port_vlan_off(vsi);
/* outer VLAN ops regardless of port VLAN config */ vlan_ops = ice_is_dvm_ena(&pf->hw) ?
vlan_ops->add_vlan = ice_vsi_add_vlan; &vsi->outer_vlan_ops : &vsi->inner_vlan_ops;
vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
if (ice_vf_is_port_vlan_ena(vf)) {
/* setup outer VLAN ops */
vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
/* all Rx traffic should be in the domain of the
* assigned port VLAN, so prevent disabling Rx VLAN
* filtering
*/
vlan_ops->dis_rx_filtering = noop_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
/* setup inner VLAN ops */
vlan_ops = &vsi->inner_vlan_ops;
vlan_ops->add_vlan = noop_vlan_arg;
vlan_ops->del_vlan = noop_vlan_arg;
vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
} else {
vlan_ops->dis_rx_filtering =
ice_vsi_dis_rx_vlan_filtering;
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
vlan_ops->del_vlan = ice_vsi_del_vlan;
vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
/* setup inner VLAN ops */
vlan_ops = &vsi->inner_vlan_ops;
vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
}
} else {
vlan_ops = &vsi->inner_vlan_ops;
/* inner VLAN ops regardless of port VLAN config */ vlan_ops->add_vlan = ice_vsi_add_vlan;
vlan_ops->add_vlan = ice_vsi_add_vlan; vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering; vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering; vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
if (ice_vf_is_port_vlan_ena(vf)) {
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
/* all Rx traffic should be in the domain of the
* assigned port VLAN, so prevent disabling Rx VLAN
* filtering
*/
vlan_ops->dis_rx_filtering = noop_vlan;
} else {
vlan_ops->dis_rx_filtering =
ice_vsi_dis_rx_vlan_filtering;
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
vlan_ops->del_vlan = ice_vsi_del_vlan;
vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
}
}
} }
/** /**
......
...@@ -13,7 +13,11 @@ void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi); ...@@ -13,7 +13,11 @@ void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi);
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi); void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi);
void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi);
void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi);
#else #else
static inline void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { } static inline void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { }
static inline void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi) { }
static inline void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi) { }
#endif /* CONFIG_PCI_IOV */ #endif /* CONFIG_PCI_IOV */
#endif /* _ICE_PF_VSI_VLAN_OPS_H_ */ #endif /* _ICE_PF_VSI_VLAN_OPS_H_ */
...@@ -202,6 +202,24 @@ int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi) ...@@ -202,6 +202,24 @@ int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi)
return ice_vsi_manage_vlan_insertion(vsi); return ice_vsi_manage_vlan_insertion(vsi);
} }
static void
ice_save_vlan_info(struct ice_aqc_vsi_props *info,
struct ice_vsi_vlan_info *vlan)
{
vlan->sw_flags2 = info->sw_flags2;
vlan->inner_vlan_flags = info->inner_vlan_flags;
vlan->outer_vlan_flags = info->outer_vlan_flags;
}
static void
ice_restore_vlan_info(struct ice_aqc_vsi_props *info,
struct ice_vsi_vlan_info *vlan)
{
info->sw_flags2 = vlan->sw_flags2;
info->inner_vlan_flags = vlan->inner_vlan_flags;
info->outer_vlan_flags = vlan->outer_vlan_flags;
}
/** /**
* __ice_vsi_set_inner_port_vlan - set port VLAN VSI context settings to enable a port VLAN * __ice_vsi_set_inner_port_vlan - set port VLAN VSI context settings to enable a port VLAN
* @vsi: the VSI to update * @vsi: the VSI to update
...@@ -218,6 +236,7 @@ static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info) ...@@ -218,6 +236,7 @@ static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info)
if (!ctxt) if (!ctxt)
return -ENOMEM; return -ENOMEM;
ice_save_vlan_info(&vsi->info, &vsi->vlan_info);
ctxt->info = vsi->info; ctxt->info = vsi->info;
info = &ctxt->info; info = &ctxt->info;
info->inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED | info->inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED |
...@@ -259,6 +278,33 @@ int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) ...@@ -259,6 +278,33 @@ int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
return __ice_vsi_set_inner_port_vlan(vsi, port_vlan_info); return __ice_vsi_set_inner_port_vlan(vsi, port_vlan_info);
} }
int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi)
{
struct ice_hw *hw = &vsi->back->hw;
struct ice_aqc_vsi_props *info;
struct ice_vsi_ctx *ctxt;
int ret;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
ice_restore_vlan_info(&vsi->info, &vsi->vlan_info);
vsi->info.port_based_inner_vlan = 0;
ctxt->info = vsi->info;
info = &ctxt->info;
info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
ICE_AQ_VSI_PROP_SW_VALID);
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (ret)
dev_err(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
ret, ice_aq_str(hw->adminq.sq_last_status));
kfree(ctxt);
return ret;
}
/** /**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on * @vsi: VSI to enable or disable VLAN pruning on
...@@ -647,6 +693,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid) ...@@ -647,6 +693,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
if (!ctxt) if (!ctxt)
return -ENOMEM; return -ENOMEM;
ice_save_vlan_info(&vsi->info, &vsi->vlan_info);
ctxt->info = vsi->info; ctxt->info = vsi->info;
ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
...@@ -689,9 +736,6 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid) ...@@ -689,9 +736,6 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
* used if DVM is supported. Also, this function should never be called directly * used if DVM is supported. Also, this function should never be called directly
* as it should be part of ice_vsi_vlan_ops if it's needed. * as it should be part of ice_vsi_vlan_ops if it's needed.
* *
* This function does not support clearing the port VLAN as there is currently
* no use case for this.
*
* Use the ice_vlan structure passed in to set this VSI in a port VLAN. * Use the ice_vlan structure passed in to set this VSI in a port VLAN.
*/ */
int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
...@@ -705,3 +749,37 @@ int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) ...@@ -705,3 +749,37 @@ int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
return __ice_vsi_set_outer_port_vlan(vsi, port_vlan_info, vlan->tpid); return __ice_vsi_set_outer_port_vlan(vsi, port_vlan_info, vlan->tpid);
} }
/**
* ice_vsi_clear_outer_port_vlan - clear outer port vlan
* @vsi: VSI to configure
*
* The function is restoring previously set vlan config (saved in
* vsi->vlan_info). Setting happens in port vlan configuration.
*/
int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi)
{
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
int err;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
ice_restore_vlan_info(&vsi->info, &vsi->vlan_info);
vsi->info.port_based_outer_vlan = 0;
ctxt->info = vsi->info;
ctxt->info.valid_sections =
cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID |
ICE_AQ_VSI_PROP_SW_VALID);
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing outer port based VLAN failed, err %d aq_err %s\n",
err, ice_aq_str(hw->adminq.sq_last_status));
kfree(ctxt);
return err;
}
...@@ -7,6 +7,12 @@ ...@@ -7,6 +7,12 @@
#include <linux/types.h> #include <linux/types.h>
#include "ice_vlan.h" #include "ice_vlan.h"
struct ice_vsi_vlan_info {
u8 sw_flags2;
u8 inner_vlan_flags;
u8 outer_vlan_flags;
};
struct ice_vsi; struct ice_vsi;
int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
...@@ -17,6 +23,7 @@ int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi); ...@@ -17,6 +23,7 @@ int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi);
int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, u16 tpid); int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, u16 tpid);
int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi); int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi);
int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi);
int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi); int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi);
int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi); int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi);
...@@ -28,5 +35,6 @@ int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi); ...@@ -28,5 +35,6 @@ int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi);
int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid); int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid);
int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi); int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi);
int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi);
#endif /* _ICE_VSI_VLAN_LIB_H_ */ #endif /* _ICE_VSI_VLAN_LIB_H_ */
...@@ -21,6 +21,7 @@ struct ice_vsi_vlan_ops { ...@@ -21,6 +21,7 @@ struct ice_vsi_vlan_ops {
int (*ena_tx_filtering)(struct ice_vsi *vsi); int (*ena_tx_filtering)(struct ice_vsi *vsi);
int (*dis_tx_filtering)(struct ice_vsi *vsi); int (*dis_tx_filtering)(struct ice_vsi *vsi);
int (*set_port_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan); int (*set_port_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
int (*clear_port_vlan)(struct ice_vsi *vsi);
}; };
void ice_vsi_init_vlan_ops(struct ice_vsi *vsi); void ice_vsi_init_vlan_ops(struct ice_vsi *vsi);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment