Commit 65d51f26 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2018-01-08' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

mlx5-updates-2018-01-08

Four patches from Or that add Hairpin support to mlx5:
===========================================================
From:  Or Gerlitz <ogerlitz@mellanox.com>

We refer the ability of NIC HW to fwd packet received on one port to
the other port (also from a port to itself) as hairpin. The application API
is based
on ingress tc/flower rules set on the NIC with the mirred redirect
action. Other actions can apply to packets during the redirect.

Hairpin allows to offload the data-path of various SW DDoS gateways,
load-balancers, etc to HW. Packets go through all the required
processing in HW (header re-write, encap/decap, push/pop vlan) and
then forwarded, CPU stays at practically zero usage. HW Flow counters
are used by the control plane for monitoring and accounting.

Hairpin is implemented by pairing a receive queue (RQ) to send queue (SQ).
All the flows that share <recv NIC, mirred NIC> are redirected through
the same hairpin pair. Currently, only header-rewrite is supported as a
packet modification action.

I'd like to thanks Elijah Shakkour <elijahs@mellanox.com> for implementing this
functionality
on HW simulator, before it was avail in the FW so the driver code could be
tested early.
===========================================================

From Feras three patches that provide very small changes that allow IPoIB
to support RX timestamping for child interfaces, simply by hooking the mlx5e
timestamping PTP ioctl to IPoIB child interface netdev profile.

One patch from Gal to fix a spilling mistake.

Two patches from Eugenia adds drop counters to VF statistics
to be reported as part of VF statistics in netlink (iproute2) and
implemented them in mlx5 eswitch.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 45f89822 b8a0dbe3
......@@ -659,6 +659,7 @@ struct mlx5e_tc_table {
struct rhashtable ht;
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
DECLARE_HASHTABLE(hairpin_tbl, 8);
};
struct mlx5e_vlan_table {
......
......@@ -1175,7 +1175,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
struct hwtstamp_config *tstamp;
struct net_device *netdev;
struct mlx5e_priv *priv;
char *pseudo_header;
u32 qpn;
u8 *dgid;
......@@ -1194,6 +1196,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
return;
}
priv = mlx5i_epriv(netdev);
tstamp = &priv->tstamp;
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
......@@ -1214,7 +1219,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
skb_hwtstamps(skb)->hwtstamp =
mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
......
......@@ -56,12 +56,14 @@ struct mlx5_nic_flow_attr {
u32 action;
u32 flow_tag;
u32 mod_hdr_id;
u32 hairpin_tirn;
};
enum {
MLX5E_TC_FLOW_ESWITCH = BIT(0),
MLX5E_TC_FLOW_NIC = BIT(1),
MLX5E_TC_FLOW_OFFLOADED = BIT(2),
MLX5E_TC_FLOW_HAIRPIN = BIT(3),
};
struct mlx5e_tc_flow {
......@@ -71,6 +73,7 @@ struct mlx5e_tc_flow {
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap ID */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
struct list_head hairpin; /* flows sharing the same hairpin */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
......@@ -93,6 +96,25 @@ enum {
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
struct mlx5e_hairpin {
struct mlx5_hairpin *pair;
struct mlx5_core_dev *func_mdev;
u32 tdn;
u32 tirn;
};
struct mlx5e_hairpin_entry {
/* a node of a hash table which keeps all the hairpin entries */
struct hlist_node hairpin_hlist;
/* flows sharing the same hairpin */
struct list_head flows;
int peer_ifindex;
struct mlx5e_hairpin *hp;
};
struct mod_hdr_key {
int num_actions;
void *actions;
......@@ -222,6 +244,187 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
}
}
static
struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
{
struct net_device *netdev;
struct mlx5e_priv *priv;
netdev = __dev_get_by_index(net, ifindex);
priv = netdev_priv(netdev);
return priv->mdev;
}
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
{
u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
void *tirc;
int err;
err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
if (err)
goto alloc_tdn_err;
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn);
MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
if (err)
goto create_tir_err;
return 0;
create_tir_err:
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
alloc_tdn_err:
return err;
}
static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
{
mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
}
static struct mlx5e_hairpin *
mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
int peer_ifindex)
{
struct mlx5_core_dev *func_mdev, *peer_mdev;
struct mlx5e_hairpin *hp;
struct mlx5_hairpin *pair;
int err;
hp = kzalloc(sizeof(*hp), GFP_KERNEL);
if (!hp)
return ERR_PTR(-ENOMEM);
func_mdev = priv->mdev;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
if (IS_ERR(pair)) {
err = PTR_ERR(pair);
goto create_pair_err;
}
hp->pair = pair;
hp->func_mdev = func_mdev;
err = mlx5e_hairpin_create_transport(hp);
if (err)
goto create_transport_err;
return hp;
create_transport_err:
mlx5_core_hairpin_destroy(hp->pair);
create_pair_err:
kfree(hp);
return ERR_PTR(err);
}
static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
{
mlx5e_hairpin_destroy_transport(hp);
mlx5_core_hairpin_destroy(hp->pair);
kvfree(hp);
}
static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
int peer_ifindex)
{
struct mlx5e_hairpin_entry *hpe;
hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
hairpin_hlist, peer_ifindex) {
if (hpe->peer_ifindex == peer_ifindex)
return hpe;
}
return NULL;
}
static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr)
{
int peer_ifindex = parse_attr->mirred_ifindex;
struct mlx5_hairpin_params params;
struct mlx5e_hairpin_entry *hpe;
struct mlx5e_hairpin *hp;
int err;
if (!MLX5_CAP_GEN(priv->mdev, hairpin)) {
netdev_warn(priv->netdev, "hairpin is not supported\n");
return -EOPNOTSUPP;
}
hpe = mlx5e_hairpin_get(priv, peer_ifindex);
if (hpe)
goto attach_flow;
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
if (!hpe)
return -ENOMEM;
INIT_LIST_HEAD(&hpe->flows);
hpe->peer_ifindex = peer_ifindex;
params.log_data_size = 15;
params.log_data_size = min_t(u8, params.log_data_size,
MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
params.log_data_size = max_t(u8, params.log_data_size,
MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
params.q_counter = priv->q_counter;
hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
if (IS_ERR(hp)) {
err = PTR_ERR(hp);
goto create_hairpin_err;
}
netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x log data size %d\n",
hp->tirn, hp->pair->rqn, hp->pair->peer_mdev->priv.name,
hp->pair->sqn, params.log_data_size);
hpe->hp = hp;
hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, peer_ifindex);
attach_flow:
flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
list_add(&flow->hairpin, &hpe->flows);
return 0;
create_hairpin_err:
kfree(hpe);
return err;
}
static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct list_head *next = flow->hairpin.next;
list_del(&flow->hairpin);
/* no more hairpin flows for us, release the hairpin pair */
if (list_empty(next)) {
struct mlx5e_hairpin_entry *hpe;
hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
hpe->hp->pair->peer_mdev->priv.name);
mlx5e_hairpin_destroy(hpe->hp);
hash_del(&hpe->hairpin_hlist);
kfree(hpe);
}
}
static struct mlx5_flow_handle *
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
......@@ -229,7 +432,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flow_tag = attr->flow_tag,
......@@ -238,18 +441,33 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
bool table_created = false;
int err;
int err, dest_ix = 0;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fs.vlan.ft.t;
} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
if (IS_ERR(counter))
return ERR_CAST(counter);
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
if (err) {
rule = ERR_PTR(err);
goto err_add_hairpin_flow;
}
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest[dest_ix].tir_num = attr->hairpin_tirn;
} else {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[dest_ix].ft = priv->fs.vlan.ft.t;
}
dest_ix++;
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest.counter = counter;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
if (IS_ERR(counter)) {
rule = ERR_CAST(counter);
goto err_fc_create;
}
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[dest_ix].counter = counter;
dest_ix++;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
......@@ -292,7 +510,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
&flow_act, &dest, 1);
&flow_act, dest, dest_ix);
if (IS_ERR(rule))
goto err_add_rule;
......@@ -309,7 +527,10 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
mlx5e_detach_mod_hdr(priv, flow);
err_create_mod_hdr_id:
mlx5_fc_destroy(dev, counter);
err_fc_create:
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
mlx5e_hairpin_flow_del(priv, flow);
err_add_hairpin_flow:
return rule;
}
......@@ -330,6 +551,9 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
mlx5e_hairpin_flow_del(priv, flow);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
......@@ -1422,6 +1646,20 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return true;
}
static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
u16 func_id, peer_id;
fmdev = priv->mdev;
pmdev = peer_priv->mdev;
func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
return (func_id == peer_id);
}
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow)
......@@ -1466,6 +1704,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EOPNOTSUPP;
}
if (is_tcf_mirred_egress_redirect(a)) {
struct net_device *peer_dev = tcf_mirred_dev(a);
if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
same_hw_devs(priv, netdev_priv(peer_dev))) {
parse_attr->mirred_ifindex = peer_dev->ifindex;
flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
} else {
netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
peer_dev->name);
return -EINVAL;
}
continue;
}
if (is_tcf_skbedit_mark(a)) {
u32 mark = tcf_skbedit_mark(a);
......@@ -2188,6 +2443,7 @@ int mlx5e_tc_init(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc;
hash_init(tc->mod_hdr_tbl);
hash_init(tc->hairpin_tbl);
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
......
......@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
#include "fs_core.h"
#define UPLINK_VPORT 0xFFFF
......@@ -1123,8 +1124,12 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_fc *counter = vport->ingress.drop_counter;
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int dest_num = 0;
int err = 0;
u8 *smac_v;
......@@ -1188,9 +1193,18 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach drop flow counter */
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
drop_ctr_dst.counter = counter;
dst = &drop_ctr_dst;
dest_num++;
}
vport->ingress.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
&flow_act, NULL, 0);
&flow_act, dst, dest_num);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
esw_warn(esw->dev,
......@@ -1210,8 +1224,12 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_fc *counter = vport->egress.drop_counter;
struct mlx5_flow_destination drop_ctr_dst = {0};
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int dest_num = 0;
int err = 0;
esw_vport_cleanup_egress_rules(esw, vport);
......@@ -1262,9 +1280,18 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
/* Drop others rule (star rule) */
memset(spec, 0, sizeof(*spec));
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach egress drop flow counter */
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
drop_ctr_dst.counter = counter;
dst = &drop_ctr_dst;
dest_num++;
}
vport->egress.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec,
&flow_act, NULL, 0);
&flow_act, dst, dest_num);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
esw_warn(esw->dev,
......@@ -1457,6 +1484,41 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
}
}
static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
{
struct mlx5_core_dev *dev = vport->dev;
if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
vport->ingress.drop_counter = mlx5_fc_create(dev, false);
if (IS_ERR(vport->ingress.drop_counter)) {
esw_warn(dev,
"vport[%d] configure ingress drop rule counter failed\n",
vport->vport);
vport->ingress.drop_counter = NULL;
}
}
if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
vport->egress.drop_counter = mlx5_fc_create(dev, false);
if (IS_ERR(vport->egress.drop_counter)) {
esw_warn(dev,
"vport[%d] configure egress drop rule counter failed\n",
vport->vport);
vport->egress.drop_counter = NULL;
}
}
}
static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
{
struct mlx5_core_dev *dev = vport->dev;
if (vport->ingress.drop_counter)
mlx5_fc_destroy(dev, vport->ingress.drop_counter);
if (vport->egress.drop_counter)
mlx5_fc_destroy(dev, vport->egress.drop_counter);
}
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
......@@ -1483,6 +1545,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
if (!vport_num)
vport->info.trusted = true;
/* create steering drop counters for ingress and egress ACLs */
if (vport_num && esw->mode == SRIOV_LEGACY)
esw_vport_create_drop_counters(vport);
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
......@@ -1521,6 +1587,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
esw_vport_destroy_drop_counters(vport);
}
esw->enabled_vports--;
mutex_unlock(&esw->state_lock);
......@@ -2016,12 +2083,36 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
return err;
}
static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
int vport_idx,
struct mlx5_vport_drop_stats *stats)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5_vport *vport = &esw->vports[vport_idx];
u64 bytes = 0;
u16 idx = 0;
if (!vport->enabled || esw->mode != SRIOV_LEGACY)
return;
if (vport->egress.drop_counter) {
idx = vport->egress.drop_counter->id;
mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes);
}
if (vport->ingress.drop_counter) {
idx = vport->ingress.drop_counter->id;
mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
}
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
struct mlx5_vport_drop_stats stats = {0};
int err = 0;
u32 *out;
......@@ -2076,6 +2167,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
vf_stats->rx_dropped = stats.rx_dropped;
vf_stats->tx_dropped = stats.tx_dropped;
free_out:
kvfree(out);
return err;
......
......@@ -73,6 +73,7 @@ struct vport_ingress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_handle *drop_rule;
struct mlx5_fc *drop_counter;
};
struct vport_egress {
......@@ -81,6 +82,12 @@ struct vport_egress {
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allowed_vlan;
struct mlx5_flow_handle *drop_rule;
struct mlx5_fc *drop_counter;
};
struct mlx5_vport_drop_stats {
u64 rx_dropped;
u64 tx_dropped;
};
struct mlx5_vport_info {
......
......@@ -233,6 +233,8 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
unsigned long delay);
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval);
int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
u64 *packets, u64 *bytes);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
......
......@@ -312,6 +312,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
}
}
int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
u64 *packets, u64 *bytes)
{
return mlx5_cmd_fc_query(dev, id, packets, bytes);
}
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
{
......
......@@ -254,4 +254,5 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
const struct ethtool_ops mlx5i_pkey_ethtool_ops = {
.get_drvinfo = mlx5i_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ts_info = mlx5i_get_ts_info,
};
......@@ -41,7 +41,6 @@
static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_netdev_ops = {
.ndo_open = mlx5i_open,
......@@ -396,7 +395,7 @@ int mlx5i_dev_init(struct net_device *dev)
return 0;
}
static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
......
......@@ -76,9 +76,10 @@ int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn);
/* Get the net-device corresponding to the given underlay QPN */
struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
/* Shared ndo functionts */
/* Shared ndo functions */
int mlx5i_dev_init(struct net_device *dev);
void mlx5i_dev_cleanup(struct net_device *dev);
int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Parent profile functions */
void mlx5i_init(struct mlx5_core_dev *mdev,
......
......@@ -140,6 +140,7 @@ static int mlx5i_pkey_close(struct net_device *netdev);
static int mlx5i_pkey_dev_init(struct net_device *dev);
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_open = mlx5i_pkey_open,
......@@ -147,6 +148,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
.ndo_init = mlx5i_pkey_dev_init,
.ndo_uninit = mlx5i_pkey_dev_cleanup,
.ndo_change_mtu = mlx5i_pkey_change_mtu,
.ndo_do_ioctl = mlx5i_pkey_ioctl,
};
/* Child NDOs */
......@@ -174,6 +176,11 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
return mlx5i_dev_init(dev);
}
static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
return mlx5i_ioctl(dev, ifr, cmd);
}
static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
{
return mlx5i_dev_cleanup(netdev);
......
......@@ -398,3 +398,187 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev,
struct mlx5_hairpin_params *params, u32 *rqn)
{
u32 in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
void *rqc, *wq;
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(rqc, rqc, hairpin, 1);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, counter_set_id, params->q_counter);
MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn);
}
static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev,
struct mlx5_hairpin_params *params, u32 *sqn)
{
u32 in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
void *sqc, *wq;
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(sqc, sqc, hairpin, 1);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn);
}
static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp,
struct mlx5_hairpin_params *params)
{
int err;
err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn);
if (err)
goto out_err_rq;
err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn);
if (err)
goto out_err_sq;
return 0;
out_err_sq:
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn);
out_err_rq:
return err;
}
static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
{
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn);
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn);
}
static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
int curr_state, int next_state,
u16 peer_vhca, u32 peer_sq)
{
u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
void *rqc;
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
if (next_state == MLX5_RQC_STATE_RDY) {
MLX5_SET(rqc, rqc, hairpin_peer_sq, peer_sq);
MLX5_SET(rqc, rqc, hairpin_peer_vhca, peer_vhca);
}
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
MLX5_SET(rqc, rqc, state, next_state);
return mlx5_core_modify_rq(func_mdev, rqn,
in, MLX5_ST_SZ_BYTES(modify_rq_in));
}
static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
int curr_state, int next_state,
u16 peer_vhca, u32 peer_rq)
{
u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
void *sqc;
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
if (next_state == MLX5_RQC_STATE_RDY) {
MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
}
MLX5_SET(modify_sq_in, in, sq_state, curr_state);
MLX5_SET(sqc, sqc, state, next_state);
return mlx5_core_modify_sq(peer_mdev, sqn,
in, MLX5_ST_SZ_BYTES(modify_sq_in));
}
static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
{
int err;
/* set peer SQ */
err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn,
MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn);
if (err)
goto err_modify_sq;
/* set func RQ */
err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn,
MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY,
MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn);
if (err)
goto err_modify_rq;
return 0;
err_modify_rq:
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_RST, 0, 0);
err_modify_sq:
return err;
}
static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
{
/* unset func RQ */
mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn, MLX5_RQC_STATE_RDY,
MLX5_RQC_STATE_RST, 0, 0);
/* unset peer SQ */
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_RST, 0, 0);
}
struct mlx5_hairpin *
mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
struct mlx5_core_dev *peer_mdev,
struct mlx5_hairpin_params *params)
{
struct mlx5_hairpin *hp;
int size, err;
size = sizeof(*hp);
hp = kzalloc(size, GFP_KERNEL);
if (!hp)
return ERR_PTR(-ENOMEM);
hp->func_mdev = func_mdev;
hp->peer_mdev = peer_mdev;
/* alloc and pair func --> peer hairpin */
err = mlx5_hairpin_create_queues(hp, params);
if (err)
goto err_create_queues;
err = mlx5_hairpin_pair_queues(hp);
if (err)
goto err_pair_queues;
return hp;
err_pair_queues:
mlx5_hairpin_destroy_queues(hp);
err_create_queues:
kfree(hp);
return ERR_PTR(err);
}
void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
{
mlx5_hairpin_unpair_queues(hp);
mlx5_hairpin_destroy_queues(hp);
kfree(hp);
}
......@@ -13,6 +13,8 @@ struct ifla_vf_stats {
__u64 tx_bytes;
__u64 broadcast;
__u64 multicast;
__u64 rx_dropped;
__u64 tx_dropped;
};
struct ifla_vf_info {
......
......@@ -794,7 +794,10 @@ enum {
};
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x80];
u8 reserved_at_0[0x30];
u8 vhca_id[0x10];
u8 reserved_at_40[0x40];
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
......@@ -1023,12 +1026,19 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_3b8[0x3];
u8 log_min_stride_sz_sq[0x5];
u8 reserved_at_3c0[0x1b];
u8 hairpin[0x1];
u8 reserved_at_3c1[0x2];
u8 log_max_hairpin_queues[0x5];
u8 reserved_at_3c8[0x3];
u8 log_max_hairpin_wq_data_sz[0x5];
u8 reserved_at_3d0[0xb];
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
u8 disable_local_lb[0x1];
u8 reserved_at_3e2[0x9];
u8 reserved_at_3e2[0x1];
u8 log_min_hairpin_wq_data_sz[0x5];
u8 reserved_at_3e8[0x3];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
......@@ -1162,7 +1172,10 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_118[0x3];
u8 log_wq_sz[0x5];
u8 reserved_at_120[0x15];
u8 reserved_at_120[0xb];
u8 log_hairpin_data_sz[0x5];
u8 reserved_at_130[0x5];
u8 log_wqe_num_of_strides[0x3];
u8 two_byte_shift_en[0x1];
u8 reserved_at_139[0x4];
......@@ -2482,7 +2495,8 @@ struct mlx5_ifc_sqc_bits {
u8 state[0x4];
u8 reg_umr[0x1];
u8 allow_swp[0x1];
u8 reserved_at_e[0x12];
u8 hairpin[0x1];
u8 reserved_at_f[0x11];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
......@@ -2490,7 +2504,13 @@ struct mlx5_ifc_sqc_bits {
u8 reserved_at_40[0x8];
u8 cqn[0x18];
u8 reserved_at_60[0x90];
u8 reserved_at_60[0x8];
u8 hairpin_peer_rq[0x18];
u8 reserved_at_80[0x10];
u8 hairpin_peer_vhca[0x10];
u8 reserved_at_a0[0x50];
u8 packet_pacing_rate_limit_index[0x10];
u8 tis_lst_sz[0x10];
......@@ -2562,7 +2582,8 @@ struct mlx5_ifc_rqc_bits {
u8 state[0x4];
u8 reserved_at_c[0x1];
u8 flush_in_error_en[0x1];
u8 reserved_at_e[0x12];
u8 hairpin[0x1];
u8 reserved_at_f[0x11];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
......@@ -2576,7 +2597,13 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_80[0x8];
u8 rmpn[0x18];
u8 reserved_at_a0[0xe0];
u8 reserved_at_a0[0x8];
u8 hairpin_peer_sq[0x18];
u8 reserved_at_c0[0x10];
u8 hairpin_peer_vhca[0x10];
u8 reserved_at_e0[0xa0];
struct mlx5_ifc_wq_bits wq;
};
......
......@@ -75,4 +75,23 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen);
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
struct mlx5_hairpin_params {
u8 log_data_size;
u16 q_counter;
};
struct mlx5_hairpin {
struct mlx5_core_dev *func_mdev;
struct mlx5_core_dev *peer_mdev;
u32 rqn;
u32 sqn;
};
struct mlx5_hairpin *
mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
struct mlx5_core_dev *peer_mdev,
struct mlx5_hairpin_params *params);
void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
#endif /* __TRANSOBJ_H__ */
......@@ -732,6 +732,8 @@ enum {
IFLA_VF_STATS_BROADCAST,
IFLA_VF_STATS_MULTICAST,
IFLA_VF_STATS_PAD,
IFLA_VF_STATS_RX_DROPPED,
IFLA_VF_STATS_TX_DROPPED,
__IFLA_VF_STATS_MAX,
};
......
......@@ -904,6 +904,10 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_MULTICAST */
nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_RX_DROPPED */
nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_TX_DROPPED */
nla_total_size_64bit(sizeof(__u64)) +
nla_total_size(sizeof(struct ifla_vf_trust)));
return size;
} else
......@@ -1258,7 +1262,11 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
vf_stats.multicast, IFLA_VF_STATS_PAD)) {
vf_stats.multicast, IFLA_VF_STATS_PAD) ||
nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
nla_nest_cancel(skb, vfstats);
goto nla_put_vf_failure;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment