Commit 95bf387e authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-10-04' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-10-04

Misc updates for mlx5 driver

1) Add TX max rate support for MQPRIO channel mode
2) Trivial TC action and modify header refactoring
3) TC support for accept action in fdb offloads
4) Allow single IRQ for PCI functions

5) Bridge offload: Pop PVID VLAN header on egress miss

Vlad Buslov says:
=================

With current architecture of mlx5 bridge offload it is possible for a
packet to match in ingress table by source MAC (resulting VLAN header push
in case of port with configured PVID) and then miss in egress table when
destination MAC is not in FDB. Due to the lack of hardware learning in
NICs, this, in turn, results packet going to software data path with PVID
VLAN already added by hardware. This doesn't break software bridge since it
accepts either untagged packets or packets with any provisioned VLAN on
ports with PVID, but can break ingress TC, if affected part of Ethernet
header is matched by classifier.

Improve compatibility with software TC by restoring the packet header on
egress miss. Effectively, this change implements atomicity of mlx5 bridge
offload implementation - packet is either modified and redirected to
destination port or appears unmodified in software.

=================

=================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 45c9d966 f891b7cd
...@@ -1559,6 +1559,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) ...@@ -1559,6 +1559,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_IB_NUM_PF_EQE, .nent = MLX5_IB_NUM_PF_EQE,
}; };
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT; param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
......
...@@ -251,6 +251,9 @@ struct mlx5e_params { ...@@ -251,6 +251,9 @@ struct mlx5e_params {
u16 mode; u16 mode;
u8 num_tc; u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
struct {
struct mlx5e_mqprio_rl *rl;
} channel;
} mqprio; } mqprio;
bool rx_cqe_compress_def; bool rx_cqe_compress_def;
bool tunneled_offload_en; bool tunneled_offload_en;
...@@ -877,6 +880,7 @@ struct mlx5e_priv { ...@@ -877,6 +880,7 @@ struct mlx5e_priv {
#endif #endif
struct mlx5e_scratchpad scratchpad; struct mlx5e_scratchpad scratchpad;
struct mlx5e_htb htb; struct mlx5e_htb htb;
struct mlx5e_mqprio_rl *mqprio_rl;
}; };
struct mlx5e_rx_handlers { struct mlx5e_rx_handlers {
...@@ -1002,7 +1006,8 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, ...@@ -1002,7 +1006,8 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p); struct mlx5e_modify_sq_param *p);
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param, struct mlx5e_params *params, struct mlx5e_sq_param *param,
struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid); struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
struct mlx5e_sq_stats *sq_stats);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq); void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
......
...@@ -7,6 +7,21 @@ ...@@ -7,6 +7,21 @@
#define BYTES_IN_MBIT 125000 #define BYTES_IN_MBIT 125000
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
{
if (nbytes < BYTES_IN_MBIT) {
qos_warn(mdev, "Input rate (%llu Bytes/sec) below minimum supported (%u Bytes/sec)\n",
nbytes, BYTES_IN_MBIT);
return -EINVAL;
}
return 0;
}
static u32 mlx5e_qos_bytes2mbits(struct mlx5_core_dev *mdev, u64 nbytes)
{
return div_u64(nbytes, BYTES_IN_MBIT);
}
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev) int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
{ {
return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev)); return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
...@@ -238,7 +253,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs ...@@ -238,7 +253,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
if (err) if (err)
goto err_free_sq; goto err_free_sq;
err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params, err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
&param_sq, sq, 0, node->hw_id, node->qid); &param_sq, sq, 0, node->hw_id,
priv->htb.qos_sq_stats[node->qid]);
if (err) if (err)
goto err_close_cq; goto err_close_cq;
...@@ -979,3 +995,87 @@ int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ce ...@@ -979,3 +995,87 @@ int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ce
return err; return err;
} }
struct mlx5e_mqprio_rl {
struct mlx5_core_dev *mdev;
u32 root_id;
u32 *leaves_id;
u8 num_tc;
};
struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void)
{
return kvzalloc(sizeof(struct mlx5e_mqprio_rl), GFP_KERNEL);
}
void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl)
{
kvfree(rl);
}
int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
u64 max_rate[])
{
int err;
int tc;
if (!mlx5_qos_is_supported(mdev)) {
qos_warn(mdev, "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
return -EOPNOTSUPP;
}
if (num_tc > mlx5e_qos_max_leaf_nodes(mdev))
return -EINVAL;
rl->mdev = mdev;
rl->num_tc = num_tc;
rl->leaves_id = kvcalloc(num_tc, sizeof(*rl->leaves_id), GFP_KERNEL);
if (!rl->leaves_id)
return -ENOMEM;
err = mlx5_qos_create_root_node(mdev, &rl->root_id);
if (err)
goto err_free_leaves;
qos_dbg(mdev, "Root created, id %#x\n", rl->root_id);
for (tc = 0; tc < num_tc; tc++) {
u32 max_average_bw;
max_average_bw = mlx5e_qos_bytes2mbits(mdev, max_rate[tc]);
err = mlx5_qos_create_leaf_node(mdev, rl->root_id, 0, max_average_bw,
&rl->leaves_id[tc]);
if (err)
goto err_destroy_leaves;
qos_dbg(mdev, "Leaf[%d] created, id %#x, max average bw %u Mbits/sec\n",
tc, rl->leaves_id[tc], max_average_bw);
}
return 0;
err_destroy_leaves:
while (--tc >= 0)
mlx5_qos_destroy_node(mdev, rl->leaves_id[tc]);
mlx5_qos_destroy_node(mdev, rl->root_id);
err_free_leaves:
kvfree(rl->leaves_id);
return err;
}
void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl)
{
int tc;
for (tc = 0; tc < rl->num_tc; tc++)
mlx5_qos_destroy_node(rl->mdev, rl->leaves_id[tc]);
mlx5_qos_destroy_node(rl->mdev, rl->root_id);
kvfree(rl->leaves_id);
}
int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id)
{
if (tc >= rl->num_tc)
return -EINVAL;
*hw_id = rl->leaves_id[tc];
return 0;
}
...@@ -12,6 +12,7 @@ struct mlx5e_priv; ...@@ -12,6 +12,7 @@ struct mlx5e_priv;
struct mlx5e_channels; struct mlx5e_channels;
struct mlx5e_channel; struct mlx5e_channel;
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes);
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev); int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv); int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
...@@ -41,4 +42,12 @@ int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force, ...@@ -41,4 +42,12 @@ int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil, int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
/* MQPRIO TX rate limit */
struct mlx5e_mqprio_rl;
struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void);
void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl);
int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
u64 max_rate[]);
void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl);
int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id);
#endif #endif
...@@ -602,7 +602,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, ...@@ -602,7 +602,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
} }
sample_flow->pre_attr = pre_attr; sample_flow->pre_attr = pre_attr;
return sample_flow->post_rule; return sample_flow->pre_rule;
err_pre_offload_rule: err_pre_offload_rule:
kfree(pre_attr); kfree(pre_attr);
...@@ -613,7 +613,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, ...@@ -613,7 +613,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
err_obj_id: err_obj_id:
sampler_put(tc_psample, sample_flow->sampler); sampler_put(tc_psample, sample_flow->sampler);
err_sampler: err_sampler:
if (!post_act_handle) if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr); del_post_rule(esw, sample_flow, attr);
err_post_rule: err_post_rule:
if (post_act_handle) if (post_act_handle)
...@@ -628,9 +628,7 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample, ...@@ -628,9 +628,7 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_handle *rule, struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr) struct mlx5_flow_attr *attr)
{ {
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5e_sample_flow *sample_flow; struct mlx5e_sample_flow *sample_flow;
struct mlx5_vport_tbl_attr tbl_attr;
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
if (IS_ERR_OR_NULL(tc_psample)) if (IS_ERR_OR_NULL(tc_psample))
...@@ -650,23 +648,14 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample, ...@@ -650,23 +648,14 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
*/ */
sample_flow = attr->sample_attr->sample_flow; sample_flow = attr->sample_attr->sample_flow;
mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr); mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr);
if (!sample_flow->post_act_handle)
mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule,
sample_flow->post_attr);
sample_restore_put(tc_psample, sample_flow->restore); sample_restore_put(tc_psample, sample_flow->restore);
mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id); mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id);
sampler_put(tc_psample, sample_flow->sampler); sampler_put(tc_psample, sample_flow->sampler);
if (sample_flow->post_act_handle) { if (sample_flow->post_act_handle)
mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle); mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle);
} else { else
tbl_attr.chain = attr->chain; del_post_rule(esw, sample_flow, attr);
tbl_attr.prio = attr->prio;
tbl_attr.vport = esw_attr->in_rep->vport;
tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
mlx5_esw_vporttbl_put(esw, &tbl_attr);
kfree(sample_flow->post_attr);
}
kfree(sample_flow->pre_attr); kfree(sample_flow->pre_attr);
kfree(sample_flow); kfree(sample_flow);
......
...@@ -118,6 +118,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv, ...@@ -118,6 +118,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
attr->fl.fl4.flowi4_oif = uplink_dev->ifindex; attr->fl.fl4.flowi4_oif = uplink_dev->ifindex;
} else {
struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
if (tunnel && tunnel->get_remote_ifindex)
attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(mirred_dev);
} }
rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4); rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4);
...@@ -435,12 +440,15 @@ static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv, ...@@ -435,12 +440,15 @@ static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
struct net_device *mirred_dev, struct net_device *mirred_dev,
struct mlx5e_tc_tun_route_attr *attr) struct mlx5e_tc_tun_route_attr *attr)
{ {
struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
struct net_device *route_dev; struct net_device *route_dev;
struct net_device *out_dev; struct net_device *out_dev;
struct dst_entry *dst; struct dst_entry *dst;
struct neighbour *n; struct neighbour *n;
int ret; int ret;
if (tunnel && tunnel->get_remote_ifindex)
attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(mirred_dev);
dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6, dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6,
NULL); NULL);
if (IS_ERR(dst)) if (IS_ERR(dst))
......
...@@ -51,6 +51,7 @@ struct mlx5e_tc_tunnel { ...@@ -51,6 +51,7 @@ struct mlx5e_tc_tunnel {
void *headers_v); void *headers_v);
bool (*encap_info_equal)(struct mlx5e_encap_key *a, bool (*encap_info_equal)(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b); struct mlx5e_encap_key *b);
int (*get_remote_ifindex)(struct net_device *mirred_dev);
}; };
extern struct mlx5e_tc_tunnel vxlan_tunnel; extern struct mlx5e_tc_tunnel vxlan_tunnel;
......
...@@ -141,6 +141,14 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, ...@@ -141,6 +141,14 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
return 0; return 0;
} }
static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev)
{
const struct vxlan_dev *vxlan = netdev_priv(mirred_dev);
const struct vxlan_rdst *dst = &vxlan->default_dst;
return dst->remote_ifindex;
}
struct mlx5e_tc_tunnel vxlan_tunnel = { struct mlx5e_tc_tunnel vxlan_tunnel = {
.tunnel_type = MLX5E_TC_TUNNEL_TYPE_VXLAN, .tunnel_type = MLX5E_TC_TUNNEL_TYPE_VXLAN,
.match_level = MLX5_MATCH_L4, .match_level = MLX5_MATCH_L4,
...@@ -151,4 +159,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = { ...@@ -151,4 +159,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan, .parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
.parse_tunnel = mlx5e_tc_tun_parse_vxlan, .parse_tunnel = mlx5e_tc_tun_parse_vxlan,
.encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic, .encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
.get_remote_ifindex = mlx5e_tc_tun_get_remote_ifindex,
}; };
...@@ -1300,7 +1300,8 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev, ...@@ -1300,7 +1300,8 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev,
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param, struct mlx5e_params *params, struct mlx5e_sq_param *param,
struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid) struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
struct mlx5e_sq_stats *sq_stats)
{ {
struct mlx5e_create_sq_param csp = {}; struct mlx5e_create_sq_param csp = {};
u32 tx_rate; u32 tx_rate;
...@@ -1310,10 +1311,7 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, ...@@ -1310,10 +1311,7 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
if (err) if (err)
return err; return err;
if (qos_queue_group_id) sq->stats = sq_stats;
sq->stats = c->priv->htb.qos_sq_stats[qos_qid];
else
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
csp.tisn = tisn; csp.tisn = tisn;
csp.tis_lst_sz = 1; csp.tis_lst_sz = 1;
...@@ -1707,6 +1705,36 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) ...@@ -1707,6 +1705,36 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
mlx5e_close_cq(&c->sq[tc].cq); mlx5e_close_cq(&c->sq[tc].cq);
} }
static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq)
{
int tc;
for (tc = 0; tc < TC_MAX_QUEUE; tc++)
if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count)
return tc;
WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq);
return -ENOENT;
}
static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
u32 *hw_id)
{
int tc;
if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL ||
!params->mqprio.channel.rl) {
*hw_id = 0;
return 0;
}
tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix);
if (tc < 0)
return tc;
return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id);
}
static int mlx5e_open_sqs(struct mlx5e_channel *c, static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_channel_param *cparam) struct mlx5e_channel_param *cparam)
...@@ -1715,9 +1743,16 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, ...@@ -1715,9 +1743,16 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) { for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
int txq_ix = c->ix + tc * params->num_channels; int txq_ix = c->ix + tc * params->num_channels;
u32 qos_queue_group_id;
err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
if (err)
goto err_close_sqs;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0); params, &cparam->txq_sq, &c->sq[tc], tc,
qos_queue_group_id,
&c->priv->channel_stats[c->ix].sq[tc]);
if (err) if (err)
goto err_close_sqs; goto err_close_sqs;
} }
...@@ -2342,6 +2377,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) ...@@ -2342,6 +2377,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs; goto err_txqs;
} }
if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
if (priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
mlx5e_mqprio_rl_free(priv->mqprio_rl);
}
priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
}
return 0; return 0;
...@@ -2903,15 +2945,18 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc) ...@@ -2903,15 +2945,18 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
{ {
params->mqprio.mode = TC_MQPRIO_MODE_DCB; params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc; params->mqprio.num_tc = num_tc;
params->mqprio.channel.rl = NULL;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc, mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels); params->num_channels);
} }
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params, static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
struct tc_mqprio_qopt *qopt) struct tc_mqprio_qopt *qopt,
struct mlx5e_mqprio_rl *rl)
{ {
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL; params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
params->mqprio.num_tc = qopt->num_tc; params->mqprio.num_tc = qopt->num_tc;
params->mqprio.channel.rl = rl;
mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt); mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
} }
...@@ -2971,9 +3016,13 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv, ...@@ -2971,9 +3016,13 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
netdev_err(netdev, "Min tx rate is not supported\n"); netdev_err(netdev, "Min tx rate is not supported\n");
return -EINVAL; return -EINVAL;
} }
if (mqprio->max_rate[i]) { if (mqprio->max_rate[i]) {
netdev_err(netdev, "Max tx rate is not supported\n"); int err;
return -EINVAL;
err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
if (err)
return err;
} }
if (mqprio->qopt.offset[i] != agg_count) { if (mqprio->qopt.offset[i] != agg_count) {
...@@ -2992,11 +3041,22 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv, ...@@ -2992,11 +3041,22 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
return 0; return 0;
} }
static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio)
{
int tc;
for (tc = 0; tc < mqprio->qopt.num_tc; tc++)
if (mqprio->max_rate[tc])
return true;
return false;
}
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio) struct tc_mqprio_qopt_offload *mqprio)
{ {
mlx5e_fp_preactivate preactivate; mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params; struct mlx5e_params new_params;
struct mlx5e_mqprio_rl *rl;
bool nch_changed; bool nch_changed;
int err; int err;
...@@ -3004,13 +3064,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, ...@@ -3004,13 +3064,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
if (err) if (err)
return err; return err;
rl = NULL;
if (mlx5e_mqprio_rate_limit(mqprio)) {
rl = mlx5e_mqprio_rl_alloc();
if (!rl)
return -ENOMEM;
err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
mqprio->max_rate);
if (err) {
mlx5e_mqprio_rl_free(rl);
return err;
}
}
new_params = priv->channels.params; new_params = priv->channels.params;
mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt); mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1; nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx : preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx; mlx5e_update_netdev_queues_ctx;
return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
if (err && rl) {
mlx5e_mqprio_rl_cleanup(rl);
mlx5e_mqprio_rl_free(rl);
}
return err;
} }
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
...@@ -4810,6 +4889,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) ...@@ -4810,6 +4889,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->htb.qos_sq_stats[i]); kfree(priv->htb.qos_sq_stats[i]);
kvfree(priv->htb.qos_sq_stats); kvfree(priv->htb.qos_sq_stats);
if (priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
mlx5e_mqprio_rl_free(priv->mqprio_rl);
}
memset(priv, 0, sizeof(*priv)); memset(priv, 0, sizeof(*priv));
} }
......
...@@ -3169,19 +3169,50 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, ...@@ -3169,19 +3169,50 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
return true; return true;
} }
static bool actions_match_supported(struct mlx5e_priv *priv, static bool
struct flow_action *flow_action, actions_match_supported_fdb(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{
struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
bool ct_flow, ct_clear;
ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
ct_flow = flow_flag_test(flow, CT) && !ct_clear;
if (esw_attr->split_count && ct_flow &&
!MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) {
/* All registers used by ct are cleared when using
* split rules.
*/
NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct");
return false;
}
if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev,
"current firmware doesn't support split rule for port mirroring\n");
return false;
}
return true;
}
static bool
actions_match_supported(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{ {
bool ct_flow = false, ct_clear = false; u32 actions = flow->attr->action;
u32 actions; bool ct_flow, ct_clear;
ct_clear = flow->attr->ct_attr.ct_action & ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
TCA_CT_ACT_CLEAR;
ct_flow = flow_flag_test(flow, CT) && !ct_clear; ct_flow = flow_flag_test(flow, CT) && !ct_clear;
actions = flow->attr->action;
if (!(actions & if (!(actions &
(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
...@@ -3189,23 +3220,14 @@ static bool actions_match_supported(struct mlx5e_priv *priv, ...@@ -3189,23 +3220,14 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return false; return false;
} }
if (mlx5e_is_eswitch_flow(flow)) { if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
if (flow->attr->esw_attr->split_count && ct_flow && !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
!MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) { actions, ct_flow, ct_clear, extack))
/* All registers used by ct are cleared when using return false;
* split rules.
*/
NL_SET_ERR_MSG_MOD(extack,
"Can't offload mirroring with action ct");
return false;
}
}
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (mlx5e_is_eswitch_flow(flow) &&
return modify_header_match_supported(priv, &parse_attr->spec, !actions_match_supported_fdb(priv, parse_attr, flow, extack))
flow_action, actions, return false;
ct_flow, ct_clear,
extack);
return true; return true;
} }
...@@ -3354,10 +3376,50 @@ static int validate_goto_chain(struct mlx5e_priv *priv, ...@@ -3354,10 +3376,50 @@ static int validate_goto_chain(struct mlx5e_priv *priv,
return 0; return 0;
} }
static int parse_tc_nic_actions(struct mlx5e_priv *priv, static int
struct flow_action *flow_action, actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
enum mlx5_flow_namespace_type ns_type;
int err;
if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
!hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
return 0;
ns_type = get_flow_name_space(flow);
err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs,
&attr->action, extack);
if (err)
return err;
/* In case all pedit actions are skipped, remove the MOD_HDR flag. */
if (parse_attr->mod_hdr_acts.num_actions > 0)
return 0;
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
return 0;
if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
attr->esw_attr->split_count = 0;
return 0;
}
static int
parse_tc_nic_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{ {
struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr; struct mlx5_flow_attr *attr = flow->attr;
...@@ -3467,21 +3529,6 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, ...@@ -3467,21 +3529,6 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
} }
} }
if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
parse_attr, hdrs, &action, extack);
if (err)
return err;
/* in case all pedit actions are skipped, remove the MOD_HDR
* flag.
*/
if (parse_attr->mod_hdr_acts.num_actions == 0) {
action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
}
}
attr->action = action; attr->action = action;
if (attr->dest_chain && parse_attr->mirred_ifindex[0]) { if (attr->dest_chain && parse_attr->mirred_ifindex[0]) {
...@@ -3489,6 +3536,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, ...@@ -3489,6 +3536,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
if (err)
return err;
if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -3759,6 +3810,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ...@@ -3759,6 +3810,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) { flow_action_for_each(i, act, flow_action) {
switch (act->id) { switch (act->id) {
case FLOW_ACTION_ACCEPT:
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
break;
case FLOW_ACTION_DROP: case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP | action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT; MLX5_FLOW_CONTEXT_ACTION_COUNT;
...@@ -4043,26 +4099,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ...@@ -4043,26 +4099,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err; return err;
} }
if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
parse_attr, hdrs, &action, extack);
if (err)
return err;
/* in case all pedit actions are skipped, remove the MOD_HDR
* flag. we might have set split_count either by pedit or
* pop/push. if there is no pop/push either, reset it too.
*/
if (parse_attr->mod_hdr_acts.num_actions == 0) {
action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
(action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
esw_attr->split_count = 0;
}
}
attr->action = action; attr->action = action;
err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
if (err)
return err;
if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -4080,13 +4122,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ...@@ -4080,13 +4122,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
return -EOPNOTSUPP;
}
/* Allocate sample attribute only when there is a sample action and /* Allocate sample attribute only when there is a sample action and
* no errors after parsing. * no errors after parsing.
*/ */
...@@ -5005,9 +5040,11 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) ...@@ -5005,9 +5040,11 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
} }
uplink_priv->tunnel_mapping = mapping; uplink_priv->tunnel_mapping = mapping;
/* 0xFFF is reserved for stack devices slow path table mark */ /* Two last values are reserved for stack devices slow path table mark
* and bridge ingress push mark.
*/
mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS, mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true); sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
if (IS_ERR(mapping)) { if (IS_ERR(mapping)) {
err = PTR_ERR(mapping); err = PTR_ERR(mapping);
goto err_enc_opts_mapping; goto err_enc_opts_mapping;
......
...@@ -632,6 +632,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) ...@@ -632,6 +632,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_eq_notifier_register(dev, &table->cq_err_nb); mlx5_eq_notifier_register(dev, &table->cq_err_nb);
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_NUM_CMD_EQE, .nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
}; };
...@@ -644,6 +645,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) ...@@ -644,6 +645,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_NUM_ASYNC_EQE, .nent = MLX5_NUM_ASYNC_EQE,
}; };
...@@ -653,6 +655,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev) ...@@ -653,6 +655,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
goto err2; goto err2;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
.irq_index = MLX5_IRQ_EQ_CTRL,
.nent = /* TODO: sriov max_vf + */ 1, .nent = /* TODO: sriov max_vf + */ 1,
.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST, .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
}; };
...@@ -806,8 +809,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) ...@@ -806,8 +809,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
ncomp_eqs = table->num_comp_eqs; ncomp_eqs = table->num_comp_eqs;
nent = MLX5_COMP_EQ_SIZE; nent = MLX5_COMP_EQ_SIZE;
for (i = 0; i < ncomp_eqs; i++) { for (i = 0; i < ncomp_eqs; i++) {
int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
struct mlx5_eq_param param = {}; struct mlx5_eq_param param = {};
int vecidx = i;
eq = kzalloc(sizeof(*eq), GFP_KERNEL); eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) { if (!eq) {
...@@ -953,9 +956,7 @@ static int set_rmap(struct mlx5_core_dev *mdev) ...@@ -953,9 +956,7 @@ static int set_rmap(struct mlx5_core_dev *mdev)
goto err_out; goto err_out;
} }
vecidx = MLX5_IRQ_VEC_COMP_BASE; for (vecidx = 0; vecidx < eq_table->num_comp_eqs; vecidx++) {
for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE;
vecidx++) {
err = irq_cpu_rmap_add(eq_table->rmap, err = irq_cpu_rmap_add(eq_table->rmap,
pci_irq_vector(mdev->pdev, vecidx)); pci_irq_vector(mdev->pdev, vecidx));
if (err) { if (err) {
......
...@@ -49,6 +49,7 @@ struct mlx5_esw_bridge_vlan { ...@@ -49,6 +49,7 @@ struct mlx5_esw_bridge_vlan {
struct list_head fdb_list; struct list_head fdb_list;
struct mlx5_pkt_reformat *pkt_reformat_push; struct mlx5_pkt_reformat *pkt_reformat_push;
struct mlx5_pkt_reformat *pkt_reformat_pop; struct mlx5_pkt_reformat *pkt_reformat_pop;
struct mlx5_modify_hdr *pkt_mod_hdr_push_mark;
}; };
struct mlx5_esw_bridge_port { struct mlx5_esw_bridge_port {
......
...@@ -447,8 +447,16 @@ enum { ...@@ -447,8 +447,16 @@ enum {
MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2), MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3), MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4), MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4),
MLX5_ESW_ATTR_FLAG_ACCEPT = BIT(5),
}; };
/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
static inline bool
mlx5_esw_attr_flags_skip(u32 attr_flags)
{
return attr_flags & (MLX5_ESW_ATTR_FLAG_SLOW_PATH | MLX5_ESW_ATTR_FLAG_ACCEPT);
}
struct mlx5_esw_flow_attr { struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep; struct mlx5_eswitch_rep *in_rep;
struct mlx5_core_dev *in_mdev; struct mlx5_core_dev *in_mdev;
......
...@@ -440,7 +440,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest, ...@@ -440,7 +440,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
} else if (attr->dest_ft) { } else if (attr->dest_ft) {
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
(*i)++; (*i)++;
} else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { } else if (mlx5_esw_attr_flags_skip(attr->flags)) {
esw_setup_slow_path_dest(dest, flow_act, chains, *i); esw_setup_slow_path_dest(dest, flow_act, chains, *i);
(*i)++; (*i)++;
} else if (attr->dest_chain) { } else if (attr->dest_chain) {
...@@ -467,7 +467,7 @@ esw_cleanup_dests(struct mlx5_eswitch *esw, ...@@ -467,7 +467,7 @@ esw_cleanup_dests(struct mlx5_eswitch *esw,
if (attr->dest_ft) { if (attr->dest_ft) {
esw_cleanup_decap_indir(esw, attr); esw_cleanup_decap_indir(esw, attr);
} else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) { } else if (!mlx5_esw_attr_flags_skip(attr->flags)) {
if (attr->dest_chain) if (attr->dest_chain)
esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0); esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
else if (esw_is_indir_table(esw, attr)) else if (esw_is_indir_table(esw, attr))
...@@ -678,7 +678,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, ...@@ -678,7 +678,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_del_flow_rules(rule); mlx5_del_flow_rules(rule);
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) { if (!mlx5_esw_attr_flags_skip(attr->flags)) {
/* unref the term table */ /* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (esw_attr->dests[i].termtbl) if (esw_attr->dests[i].termtbl)
......
...@@ -219,7 +219,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, ...@@ -219,7 +219,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) || if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) || !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH || mlx5_esw_attr_flags_skip(attr->flags) ||
!mlx5_eswitch_offload_is_uplink_port(esw, spec)) !mlx5_eswitch_offload_is_uplink_port(esw, spec))
return false; return false;
......
...@@ -8,8 +8,6 @@ ...@@ -8,8 +8,6 @@
#define MLX5_COMP_EQS_PER_SF 8 #define MLX5_COMP_EQS_PER_SF 8
#define MLX5_IRQ_EQ_CTRL (0)
struct mlx5_irq; struct mlx5_irq;
int mlx5_irq_table_init(struct mlx5_core_dev *dev); int mlx5_irq_table_init(struct mlx5_core_dev *dev);
......
...@@ -194,15 +194,25 @@ static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx) ...@@ -194,15 +194,25 @@ static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx); snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
} }
static void irq_set_name(char *name, int vecidx) static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
{ {
if (vecidx == 0) { if (!pool->xa_num_irqs.max) {
/* in case we only have a single irq for the device */
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_combined%d", vecidx);
return;
}
if (vecidx == pool->xa_num_irqs.max) {
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx); snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
return; return;
} }
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
vecidx - MLX5_IRQ_VEC_COMP_BASE); }
static bool irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
{
return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
} }
static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
...@@ -216,8 +226,8 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) ...@@ -216,8 +226,8 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
if (!irq) if (!irq)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
irq->irqn = pci_irq_vector(dev->pdev, i); irq->irqn = pci_irq_vector(dev->pdev, i);
if (!pool->name[0]) if (!irq_pool_is_sf_pool(pool))
irq_set_name(name, i); irq_set_name(pool, name, i);
else else
irq_sf_set_name(pool, name, i); irq_sf_set_name(pool, name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh); ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
...@@ -386,6 +396,9 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx, ...@@ -386,6 +396,9 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
if (IS_ERR(irq) || !affinity) if (IS_ERR(irq) || !affinity)
goto unlock; goto unlock;
cpumask_copy(irq->mask, affinity); cpumask_copy(irq->mask, affinity);
if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
cpumask_empty(irq->mask))
cpumask_set_cpu(0, irq->mask);
irq_set_affinity_hint(irq->irqn, irq->mask); irq_set_affinity_hint(irq->irqn, irq->mask);
unlock: unlock:
mutex_unlock(&pool->lock); mutex_unlock(&pool->lock);
...@@ -440,6 +453,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx, ...@@ -440,6 +453,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
} }
pf_irq: pf_irq:
pool = irq_table->pf_pool; pool = irq_table->pf_pool;
vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx;
irq = irq_pool_request_vector(pool, vecidx, affinity); irq = irq_pool_request_vector(pool, vecidx, affinity);
out: out:
if (IS_ERR(irq)) if (IS_ERR(irq))
...@@ -577,6 +591,8 @@ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev) ...@@ -577,6 +591,8 @@ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table) int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
{ {
if (!table->pf_pool->xa_num_irqs.max)
return 1;
return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min; return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min;
} }
...@@ -592,19 +608,15 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev) ...@@ -592,19 +608,15 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
if (mlx5_core_is_sf(dev)) if (mlx5_core_is_sf(dev))
return 0; return 0;
pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1;
MLX5_IRQ_VEC_COMP_BASE;
pf_vec = min_t(int, pf_vec, num_eqs); pf_vec = min_t(int, pf_vec, num_eqs);
if (pf_vec <= MLX5_IRQ_VEC_COMP_BASE)
return -ENOMEM;
total_vec = pf_vec; total_vec = pf_vec;
if (mlx5_sf_max_functions(dev)) if (mlx5_sf_max_functions(dev))
total_vec += MLX5_IRQ_CTRL_SF_MAX + total_vec += MLX5_IRQ_CTRL_SF_MAX +
MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev); MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
total_vec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1, total_vec = pci_alloc_irq_vectors(dev->pdev, 1, total_vec, PCI_IRQ_MSIX);
total_vec, PCI_IRQ_MSIX);
if (total_vec < 0) if (total_vec < 0)
return total_vec; return total_vec;
pf_vec = min(pf_vec, total_vec); pf_vec = min(pf_vec, total_vec);
......
...@@ -59,6 +59,8 @@ ...@@ -59,6 +59,8 @@
#define MLX5_ADEV_NAME "mlx5_core" #define MLX5_ADEV_NAME "mlx5_core"
#define MLX5_IRQ_EQ_CTRL (U8_MAX)
enum { enum {
MLX5_BOARD_ID_LEN = 64, MLX5_BOARD_ID_LEN = 64,
}; };
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
#ifndef MLX5_CORE_EQ_H #ifndef MLX5_CORE_EQ_H
#define MLX5_CORE_EQ_H #define MLX5_CORE_EQ_H
#define MLX5_IRQ_VEC_COMP_BASE 1
#define MLX5_NUM_CMD_EQE (32) #define MLX5_NUM_CMD_EQE (32)
#define MLX5_NUM_ASYNC_EQE (0x1000) #define MLX5_NUM_ASYNC_EQE (0x1000)
#define MLX5_NUM_SPARE_EQE (0x80) #define MLX5_NUM_SPARE_EQE (0x80)
......
...@@ -130,11 +130,20 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, ...@@ -130,11 +130,20 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET) #define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET)
#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET) #define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET)
#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */ #define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
#define ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT
/* 0x7FF is a reserved mapping */ /* 0x7FF is a reserved mapping */
#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0) #define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \ #define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT) ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK #define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
/* 0x7FE is a reserved mapping for bridge ingress push vlan mark */
#define ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN (ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT - 1)
#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN ((ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN << \
ESW_TUN_OPTS_BITS) | \
ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN)
#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK \
GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
ESW_TUN_OPTS_OFFSET + 1)
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment