Commit c7b9e633 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-sriov-vlan-push-pop'

Saeed Mahameed says:

====================
Mellanox 100G SRIOV offloads vlan push/pop

From Or Gerlitz:

This series further enhances the SRIOV TC offloads of mlx5 to handle
the TC vlan push and pop actions. This serves a common use-case in
virtualization systems where the virtual switch add (push) vlan tags
to packets sent from VMs and removes (pop) vlan tags before the packet
is received by the VM. We use the new E-Switch switchdev mode and the
TC vlan action to achieve that also in SW defined SRIOV environments by
offloading TC rules that contain this action along with forwarding
(TC mirred/redirect action) the packet.

In the first patch we add some helpers to access the TC vlan action info
by offloading drivers. The next five patches don't add any new functionality,
they do some refactoring and cleanups in the current code to be used next.

The seventh patch deals with supporting vlans by the mlx5 e-switch in switchdev
mode. The eighth patch does the vlan action offload from TC and the last patch
adds matching for vlans as typically required by TC flows that involve vlan
pop action.

The series was applied on top of commit 524605e5 "cxgb4: Convert to use simple_open()"
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fefa569a 095b6cfd
...@@ -869,6 +869,7 @@ void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, ...@@ -869,6 +869,7 @@ void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr); int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv); int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
......
...@@ -446,6 +446,16 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq) ...@@ -446,6 +446,16 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
kfree(rq->mpwqe.info); kfree(rq->mpwqe.info);
} }
static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
{
struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
if (rep && rep->vport != FDB_UPLINK_VPORT)
return true;
return false;
}
static int mlx5e_create_rq(struct mlx5e_channel *c, static int mlx5e_create_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param, struct mlx5e_rq_param *param,
struct mlx5e_rq *rq) struct mlx5e_rq *rq)
...@@ -487,6 +497,11 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, ...@@ -487,6 +497,11 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
switch (priv->params.rq_wq_type) { switch (priv->params.rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
if (mlx5e_is_vf_vport_rep(priv)) {
err = -EINVAL;
goto err_rq_wq_destroy;
}
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq; rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
...@@ -512,7 +527,11 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, ...@@ -512,7 +527,11 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy; goto err_rq_wq_destroy;
} }
if (mlx5e_is_vf_vport_rep(priv))
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep;
else
rq->handle_rx_cqe = mlx5e_handle_rx_cqe; rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
rq->alloc_wqe = mlx5e_alloc_rx_wqe; rq->alloc_wqe = mlx5e_alloc_rx_wqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
...@@ -3726,9 +3745,9 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) ...@@ -3726,9 +3745,9 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id); mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
rep.load = mlx5e_nic_rep_load; rep.load = mlx5e_nic_rep_load;
rep.unload = mlx5e_nic_rep_unload; rep.unload = mlx5e_nic_rep_unload;
rep.vport = 0; rep.vport = FDB_UPLINK_VPORT;
rep.priv_data = priv; rep.priv_data = priv;
mlx5_eswitch_register_vport_rep(esw, &rep); mlx5_eswitch_register_vport_rep(esw, 0, &rep);
} }
} }
...@@ -3867,7 +3886,7 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev) ...@@ -3867,7 +3886,7 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
rep.unload = mlx5e_vport_rep_unload; rep.unload = mlx5e_vport_rep_unload;
rep.vport = vport; rep.vport = vport;
ether_addr_copy(rep.hw_id, mac); ether_addr_copy(rep.hw_id, mac);
mlx5_eswitch_register_vport_rep(esw, &rep); mlx5_eswitch_register_vport_rep(esw, vport, &rep);
} }
} }
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <net/busy_poll.h> #include <net/busy_poll.h>
#include "en.h" #include "en.h"
#include "en_tc.h" #include "en_tc.h"
#include "eswitch.h"
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
{ {
...@@ -629,7 +630,6 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, ...@@ -629,7 +630,6 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
rq->stats.packets++; rq->stats.packets++;
rq->stats.bytes += cqe_bcnt; rq->stats.bytes += cqe_bcnt;
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
napi_gro_receive(rq->cq.napi, skb);
} }
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq) static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
...@@ -733,20 +733,15 @@ static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, ...@@ -733,20 +733,15 @@ static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
} }
} }
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) static inline
struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
u16 wqe_counter, u32 cqe_bcnt)
{ {
struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog); struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog);
struct mlx5e_dma_info *di; struct mlx5e_dma_info *di;
struct mlx5e_rx_wqe *wqe;
__be16 wqe_counter_be;
struct sk_buff *skb; struct sk_buff *skb;
u16 wqe_counter;
void *va, *data; void *va, *data;
u32 cqe_bcnt;
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
di = &rq->dma_info[wqe_counter]; di = &rq->dma_info[wqe_counter];
va = page_address(di->page); va = page_address(di->page);
data = va + MLX5_RX_HEADROOM; data = va + MLX5_RX_HEADROOM;
...@@ -757,22 +752,21 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -757,22 +752,21 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
rq->buff.wqe_sz, rq->buff.wqe_sz,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
prefetch(data); prefetch(data);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
rq->stats.wqe_err++; rq->stats.wqe_err++;
mlx5e_page_release(rq, di, true); mlx5e_page_release(rq, di, true);
goto wq_ll_pop; return NULL;
} }
if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt)) if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt))
goto wq_ll_pop; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
skb = build_skb(va, RQ_PAGE_SIZE(rq)); skb = build_skb(va, RQ_PAGE_SIZE(rq));
if (unlikely(!skb)) { if (unlikely(!skb)) {
rq->stats.buff_alloc_err++; rq->stats.buff_alloc_err++;
mlx5e_page_release(rq, di, true); mlx5e_page_release(rq, di, true);
goto wq_ll_pop; return NULL;
} }
/* queue up for recycling ..*/ /* queue up for recycling ..*/
...@@ -782,8 +776,61 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -782,8 +776,61 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb_reserve(skb, MLX5_RX_HEADROOM); skb_reserve(skb, MLX5_RX_HEADROOM);
skb_put(skb, cqe_bcnt); skb_put(skb, cqe_bcnt);
return skb;
}
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5e_rx_wqe *wqe;
__be16 wqe_counter_be;
struct sk_buff *skb;
u16 wqe_counter;
u32 cqe_bcnt;
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
if (!skb)
goto wq_ll_pop;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index);
}
void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct net_device *netdev = rq->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_eswitch_rep *rep = priv->ppriv;
struct mlx5e_rx_wqe *wqe;
struct sk_buff *skb;
__be16 wqe_counter_be;
u16 wqe_counter;
u32 cqe_bcnt;
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
if (!skb)
goto wq_ll_pop;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
napi_gro_receive(rq->cq.napi, skb);
wq_ll_pop: wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index); &wqe->next.next_wqe_index);
...@@ -861,6 +908,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -861,6 +908,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb); mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
mpwrq_cqe_out: mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe_num_strides)) if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/rhashtable.h> #include <linux/rhashtable.h>
#include <net/switchdev.h> #include <net/switchdev.h>
#include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
#include "en.h" #include "en.h"
#include "en_tc.h" #include "en_tc.h"
#include "eswitch.h" #include "eswitch.h"
...@@ -47,6 +48,7 @@ struct mlx5e_tc_flow { ...@@ -47,6 +48,7 @@ struct mlx5e_tc_flow {
struct rhash_head node; struct rhash_head node;
u64 cookie; u64 cookie;
struct mlx5_flow_rule *rule; struct mlx5_flow_rule *rule;
struct mlx5_esw_flow_attr *attr;
}; };
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
...@@ -114,27 +116,30 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -114,27 +116,30 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
u32 action, u32 dst_vport) struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_eswitch_rep *rep = priv->ppriv; int err;
u32 src_vport;
if (rep->vport) /* set source vport for the flow */ err = mlx5_eswitch_add_vlan_action(esw, attr);
src_vport = rep->vport; if (err)
else return ERR_PTR(err);
src_vport = FDB_UPLINK_VPORT;
return mlx5_eswitch_add_offloaded_rule(esw, spec, action, src_vport, dst_vport); return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
} }
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule) struct mlx5_flow_rule *rule,
struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
counter = mlx5_flow_rule_counter(rule); counter = mlx5_flow_rule_counter(rule);
if (esw && esw->mode == SRIOV_OFFLOADS)
mlx5_eswitch_del_vlan_action(esw, attr);
mlx5_del_flow_rule(rule); mlx5_del_flow_rule(rule);
mlx5_fc_destroy(priv->mdev, counter); mlx5_fc_destroy(priv->mdev, counter);
...@@ -159,6 +164,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec ...@@ -159,6 +164,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS))) { BIT(FLOW_DISSECTOR_KEY_PORTS))) {
...@@ -222,6 +228,24 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec ...@@ -222,6 +228,24 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
key->src); key->src);
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_dissector_key_vlan *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->key);
struct flow_dissector_key_vlan *mask =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
if (mask->vlan_id) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
}
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_dissector_key_ipv4_addrs *key = struct flow_dissector_key_ipv4_addrs *key =
skb_flow_dissector_target(f->dissector, skb_flow_dissector_target(f->dissector,
...@@ -361,7 +385,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -361,7 +385,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
} }
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
u32 *action, u32 *dest_vport) struct mlx5_esw_flow_attr *attr)
{ {
const struct tc_action *a; const struct tc_action *a;
LIST_HEAD(actions); LIST_HEAD(actions);
...@@ -369,16 +393,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -369,16 +393,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (tc_no_actions(exts)) if (tc_no_actions(exts))
return -EINVAL; return -EINVAL;
*action = 0; memset(attr, 0, sizeof(*attr));
attr->in_rep = priv->ppriv;
tcf_exts_to_list(exts, &actions); tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) { list_for_each_entry(a, &actions, list) {
/* Only support a single action per rule */
if (*action)
return -EINVAL;
if (is_tcf_gact_shot(a)) { if (is_tcf_gact_shot(a)) {
*action = MLX5_FLOW_CONTEXT_ACTION_DROP | attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT; MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue; continue;
} }
...@@ -387,7 +408,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -387,7 +408,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
int ifindex = tcf_mirred_ifindex(a); int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev; struct net_device *out_dev;
struct mlx5e_priv *out_priv; struct mlx5e_priv *out_priv;
struct mlx5_eswitch_rep *out_rep;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
...@@ -397,13 +417,22 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -397,13 +417,22 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL; return -EINVAL;
} }
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
out_priv = netdev_priv(out_dev); out_priv = netdev_priv(out_dev);
out_rep = out_priv->ppriv; attr->out_rep = out_priv->ppriv;
if (out_rep->vport == 0) continue;
*dest_vport = FDB_UPLINK_VPORT; }
else
*dest_vport = out_rep->vport; if (is_tcf_vlan(a)) {
*action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; if (tcf_vlan_action(a) == VLAN_F_POP) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
} else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
attr->vlan = tcf_vlan_push_vid(a);
}
continue; continue;
} }
...@@ -417,18 +446,29 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, ...@@ -417,18 +446,29 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
{ {
struct mlx5e_tc_table *tc = &priv->fs.tc; struct mlx5e_tc_table *tc = &priv->fs.tc;
int err = 0; int err = 0;
u32 flow_tag, action, dest_vport = 0; bool fdb_flow = false;
u32 flow_tag, action;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *old = NULL; struct mlx5_flow_rule *old = NULL;
struct mlx5_esw_flow_attr *old_attr;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (esw && esw->mode == SRIOV_OFFLOADS)
fdb_flow = true;
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
tc->ht_params); tc->ht_params);
if (flow) if (flow) {
old = flow->rule; old = flow->rule;
old_attr = flow->attr;
} else {
if (fdb_flow)
flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
GFP_KERNEL);
else else
flow = kzalloc(sizeof(*flow), GFP_KERNEL); flow = kzalloc(sizeof(*flow), GFP_KERNEL);
}
spec = mlx5_vzalloc(sizeof(*spec)); spec = mlx5_vzalloc(sizeof(*spec));
if (!spec || !flow) { if (!spec || !flow) {
...@@ -442,11 +482,12 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, ...@@ -442,11 +482,12 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
if (err < 0) if (err < 0)
goto err_free; goto err_free;
if (esw && esw->mode == SRIOV_OFFLOADS) { if (fdb_flow) {
err = parse_tc_fdb_actions(priv, f->exts, &action, &dest_vport); flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
err = parse_tc_fdb_actions(priv, f->exts, flow->attr);
if (err < 0) if (err < 0)
goto err_free; goto err_free;
flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, action, dest_vport); flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
} else { } else {
err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag); err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
if (err < 0) if (err < 0)
...@@ -465,7 +506,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, ...@@ -465,7 +506,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
goto err_del_rule; goto err_del_rule;
if (old) if (old)
mlx5e_tc_del_flow(priv, old); mlx5e_tc_del_flow(priv, old, old_attr);
goto out; goto out;
...@@ -493,7 +534,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, ...@@ -493,7 +534,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params); rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
mlx5e_tc_del_flow(priv, flow->rule); mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
kfree(flow); kfree(flow);
...@@ -550,7 +591,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg) ...@@ -550,7 +591,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
struct mlx5e_tc_flow *flow = ptr; struct mlx5e_tc_flow *flow = ptr;
struct mlx5e_priv *priv = arg; struct mlx5e_priv *priv = arg;
mlx5e_tc_del_flow(priv, flow->rule); mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
kfree(flow); kfree(flow);
} }
......
...@@ -127,7 +127,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, ...@@ -127,7 +127,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
} }
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
u16 vlan, u8 qos, bool set) u16 vlan, u8 qos, u8 set_flags)
{ {
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0}; u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
...@@ -135,14 +135,18 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, ...@@ -135,14 +135,18 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
return -ENOTSUPP; return -ENOTSUPP;
esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n", esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
vport, vlan, qos, set); vport, vlan, qos, set_flags);
if (set) {
if (set_flags & SET_VLAN_STRIP)
MLX5_SET(modify_esw_vport_context_in, in, MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_strip, 1); esw_vport_context.vport_cvlan_strip, 1);
if (set_flags & SET_VLAN_INSERT) {
/* insert only if no vlan in packet */ /* insert only if no vlan in packet */
MLX5_SET(modify_esw_vport_context_in, in, MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_insert, 1); esw_vport_context.vport_cvlan_insert, 1);
MLX5_SET(modify_esw_vport_context_in, in, MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.cvlan_pcp, qos); esw_vport_context.cvlan_pcp, qos);
MLX5_SET(modify_esw_vport_context_in, in, MLX5_SET(modify_esw_vport_context_in, in,
...@@ -1778,25 +1782,21 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ...@@ -1778,25 +1782,21 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
return 0; return 0;
} }
int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos) int vport, u16 vlan, u8 qos, u8 set_flags)
{ {
struct mlx5_vport *evport; struct mlx5_vport *evport;
int err = 0; int err = 0;
int set = 0;
if (!ESW_ALLOWED(esw)) if (!ESW_ALLOWED(esw))
return -EPERM; return -EPERM;
if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7)) if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
return -EINVAL; return -EINVAL;
if (vlan || qos)
set = 1;
mutex_lock(&esw->state_lock); mutex_lock(&esw->state_lock);
evport = &esw->vports[vport]; evport = &esw->vports[vport];
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set); err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
if (err) if (err)
goto unlock; goto unlock;
...@@ -1814,6 +1814,17 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, ...@@ -1814,6 +1814,17 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
return err; return err;
} }
int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos)
{
u8 set_flags = 0;
if (vlan || qos)
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
}
int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk) int vport, bool spoofchk)
{ {
......
...@@ -157,6 +157,7 @@ struct mlx5_eswitch_fdb { ...@@ -157,6 +157,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp; struct mlx5_flow_group *miss_grp;
struct mlx5_flow_rule *miss_rule; struct mlx5_flow_rule *miss_rule;
int vlan_push_pop_refcount;
} offloads; } offloads;
}; };
}; };
...@@ -178,11 +179,14 @@ struct mlx5_eswitch_rep { ...@@ -178,11 +179,14 @@ struct mlx5_eswitch_rep {
void (*unload)(struct mlx5_eswitch *esw, void (*unload)(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep); struct mlx5_eswitch_rep *rep);
u16 vport; u16 vport;
struct mlx5_flow_rule *vport_rx_rule; u8 hw_id[ETH_ALEN];
void *priv_data; void *priv_data;
struct mlx5_flow_rule *vport_rx_rule;
struct list_head vport_sqs_list; struct list_head vport_sqs_list;
u16 vlan;
u32 vlan_refcount;
bool valid; bool valid;
u8 hw_id[ETH_ALEN];
}; };
struct mlx5_esw_offload { struct mlx5_esw_offload {
...@@ -237,14 +241,32 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, ...@@ -237,14 +241,32 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct ifla_vf_stats *vf_stats); struct ifla_vf_stats *vf_stats);
struct mlx5_flow_spec; struct mlx5_flow_spec;
struct mlx5_esw_flow_attr;
struct mlx5_flow_rule * struct mlx5_flow_rule *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
u32 action, u32 src_vport, u32 dst_vport); struct mlx5_esw_flow_attr *attr);
struct mlx5_flow_rule * struct mlx5_flow_rule *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
enum {
SET_VLAN_STRIP = BIT(0),
SET_VLAN_INSERT = BIT(1)
};
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_eswitch_rep *out_rep;
int action;
u16 vlan;
bool vlan_handled;
};
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, struct mlx5_eswitch_rep *rep,
u16 *sqns_array, int sqns_num); u16 *sqns_array, int sqns_num);
...@@ -254,9 +276,17 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, ...@@ -254,9 +276,17 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode); int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index,
struct mlx5_eswitch_rep *rep); struct mlx5_eswitch_rep *rep);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
int vport); int vport_index);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr);
int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos, u8 set_flags);
#define MLX5_DEBUG_ESWITCH_MASK BIT(3) #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
......
...@@ -46,19 +46,22 @@ enum { ...@@ -46,19 +46,22 @@ enum {
struct mlx5_flow_rule * struct mlx5_flow_rule *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
u32 action, u32 src_vport, u32 dst_vport) struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_flow_destination dest = { 0 }; struct mlx5_flow_destination dest = { 0 };
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule; struct mlx5_flow_rule *rule;
void *misc; void *misc;
int action;
if (esw->mode != SRIOV_OFFLOADS) if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
action = attr->action;
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = dst_vport; dest.vport_num = attr->out_rep->vport;
action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true); counter = mlx5_fc_create(esw->dev, true);
...@@ -69,7 +72,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -69,7 +72,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
} }
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, src_vport); MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
...@@ -86,6 +89,186 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -86,6 +89,186 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule; return rule;
} }
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
{
struct mlx5_eswitch_rep *rep;
int vf_vport, err = 0;
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport];
if (!rep->valid)
continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
if (err)
goto out;
}
out:
return err;
}
static struct mlx5_eswitch_rep *
esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
{
struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
in_rep = attr->in_rep;
out_rep = attr->out_rep;
if (push)
vport = in_rep;
else if (pop)
vport = out_rep;
else
vport = in_rep;
return vport;
}
static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
bool push, bool pop, bool fwd)
{
struct mlx5_eswitch_rep *in_rep, *out_rep;
if ((push || pop) && !fwd)
goto out_notsupp;
in_rep = attr->in_rep;
out_rep = attr->out_rep;
if (push && in_rep->vport == FDB_UPLINK_VPORT)
goto out_notsupp;
if (pop && out_rep->vport == FDB_UPLINK_VPORT)
goto out_notsupp;
/* vport has vlan push configured, can't offload VF --> wire rules w.o it */
if (!push && !pop && fwd)
if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
goto out_notsupp;
/* protects against (1) setting rules with different vlans to push and
* (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
*/
if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
goto out_notsupp;
return 0;
out_notsupp:
return -ENOTSUPP;
}
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr)
{
struct offloads_fdb *offloads = &esw->fdb_table.offloads;
struct mlx5_eswitch_rep *vport = NULL;
bool push, pop, fwd;
int err = 0;
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
err = esw_add_vlan_action_check(attr, push, pop, fwd);
if (err)
return err;
attr->vlan_handled = false;
vport = esw_vlan_action_get_vport(attr, push, pop);
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
vport->vlan_refcount++;
attr->vlan_handled = true;
}
return 0;
}
if (!push && !pop)
return 0;
if (!(offloads->vlan_push_pop_refcount)) {
/* it's the 1st vlan rule, apply global vlan pop policy */
err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
if (err)
goto out;
}
offloads->vlan_push_pop_refcount++;
if (push) {
if (vport->vlan_refcount)
goto skip_set_push;
err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
SET_VLAN_INSERT | SET_VLAN_STRIP);
if (err)
goto out;
vport->vlan = attr->vlan;
skip_set_push:
vport->vlan_refcount++;
}
out:
if (!err)
attr->vlan_handled = true;
return err;
}
int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr)
{
struct offloads_fdb *offloads = &esw->fdb_table.offloads;
struct mlx5_eswitch_rep *vport = NULL;
bool push, pop, fwd;
int err = 0;
if (!attr->vlan_handled)
return 0;
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
vport = esw_vlan_action_get_vport(attr, push, pop);
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
if (attr->out_rep->vport == FDB_UPLINK_VPORT)
vport->vlan_refcount--;
return 0;
}
if (push) {
vport->vlan_refcount--;
if (vport->vlan_refcount)
goto skip_unset_push;
vport->vlan = 0;
err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
0, 0, SET_VLAN_STRIP);
if (err)
goto out;
}
skip_unset_push:
offloads->vlan_push_pop_refcount--;
if (offloads->vlan_push_pop_refcount)
return 0;
/* no more vlan rules, stop global vlan pop policy */
err = esw_set_global_vlan_pop(esw, 0);
out:
return err;
}
static struct mlx5_flow_rule * static struct mlx5_flow_rule *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{ {
...@@ -144,16 +327,12 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -144,16 +327,12 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
{ {
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_rule *flow_rule;
struct mlx5_esw_sq *esw_sq; struct mlx5_esw_sq *esw_sq;
int vport;
int err; int err;
int i; int i;
if (esw->mode != SRIOV_OFFLOADS) if (esw->mode != SRIOV_OFFLOADS)
return 0; return 0;
vport = rep->vport == 0 ?
FDB_UPLINK_VPORT : rep->vport;
for (i = 0; i < sqns_num; i++) { for (i = 0; i < sqns_num; i++) {
esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL); esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
if (!esw_sq) { if (!esw_sq) {
...@@ -163,7 +342,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -163,7 +342,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
/* Add re-inject rule to the PF/representor sqs */ /* Add re-inject rule to the PF/representor sqs */
flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
vport, rep->vport,
sqns_array[i]); sqns_array[i]);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule); err = PTR_ERR(flow_rule);
...@@ -620,27 +799,36 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) ...@@ -620,27 +799,36 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
} }
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep) int vport_index,
struct mlx5_eswitch_rep *__rep)
{ {
struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
rep = &offloads->vport_reps[vport_index];
memset(rep, 0, sizeof(*rep));
memcpy(&offloads->vport_reps[rep->vport], rep, rep->load = __rep->load;
sizeof(struct mlx5_eswitch_rep)); rep->unload = __rep->unload;
rep->vport = __rep->vport;
rep->priv_data = __rep->priv_data;
ether_addr_copy(rep->hw_id, __rep->hw_id);
INIT_LIST_HEAD(&offloads->vport_reps[rep->vport].vport_sqs_list); INIT_LIST_HEAD(&rep->vport_sqs_list);
offloads->vport_reps[rep->vport].valid = true; rep->valid = true;
} }
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
int vport) int vport_index)
{ {
struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
rep = &offloads->vport_reps[vport]; rep = &offloads->vport_reps[vport_index];
if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport].enabled) if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
rep->unload(esw, rep); rep->unload(esw, rep);
offloads->vport_reps[vport].valid = false; rep->valid = false;
} }
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#define __NET_TC_VLAN_H #define __NET_TC_VLAN_H
#include <net/act_api.h> #include <net/act_api.h>
#include <linux/tc_act/tc_vlan.h>
#define VLAN_F_POP 0x1 #define VLAN_F_POP 0x1
#define VLAN_F_PUSH 0x2 #define VLAN_F_PUSH 0x2
...@@ -24,4 +25,28 @@ struct tcf_vlan { ...@@ -24,4 +25,28 @@ struct tcf_vlan {
}; };
#define to_vlan(a) ((struct tcf_vlan *)a) #define to_vlan(a) ((struct tcf_vlan *)a)
static inline bool is_tcf_vlan(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
if (a->ops && a->ops->type == TCA_ACT_VLAN)
return true;
#endif
return false;
}
static inline u32 tcf_vlan_action(const struct tc_action *a)
{
return to_vlan(a)->tcfv_action;
}
static inline u16 tcf_vlan_push_vid(const struct tc_action *a)
{
return to_vlan(a)->tcfv_push_vid;
}
static inline __be16 tcf_vlan_push_proto(const struct tc_action *a)
{
return to_vlan(a)->tcfv_push_proto;
}
#endif /* __NET_TC_VLAN_H */ #endif /* __NET_TC_VLAN_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment