Commit 8d0f7d3a authored by Petr Machata's avatar Petr Machata Committed by David S. Miller

mlxsw: Add support for IFLA_OFFLOAD_XSTATS_L3_STATS

Spectrum machines support L3 stats by binding a counter to a RIF, a
hardware object representing a router interface. Recognize the netdevice
notifier events, NETDEV_OFFLOAD_XSTATS_*, to support enablement,
disablement, and reporting back to core.

As a netdevice gains a RIF, if L3 stats are enabled, install the counters,
and ping the core so that a userspace notification can be emitted.

Similarly, as a netdevice loses a RIF, push the as-yet-unreported
statistics to the core, so that they are not lost, and ping the core to
emit userspace notification.
Signed-off-by: default avatarPetr Machata <petrm@nvidia.com>
Signed-off-by: default avatarIdo Schimmel <idosch@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c1de13f9
......@@ -4829,6 +4829,10 @@ static bool mlxsw_sp_netdevice_event_is_router(unsigned long event)
case NETDEV_PRE_CHANGEADDR:
case NETDEV_CHANGEADDR:
case NETDEV_CHANGEMTU:
case NETDEV_OFFLOAD_XSTATS_ENABLE:
case NETDEV_OFFLOAD_XSTATS_DISABLE:
case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
return true;
default:
return false;
......
......@@ -225,6 +225,64 @@ int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
return 0;
}
struct mlxsw_sp_rif_counter_set_basic {
u64 good_unicast_packets;
u64 good_multicast_packets;
u64 good_broadcast_packets;
u64 good_unicast_bytes;
u64 good_multicast_bytes;
u64 good_broadcast_bytes;
u64 error_packets;
u64 discard_packets;
u64 error_bytes;
u64 discard_bytes;
};
static int
mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
struct mlxsw_sp_rif_counter_set_basic *set)
{
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ricnt_pl[MLXSW_REG_RICNT_LEN];
unsigned int *p_counter_index;
int err;
if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
return -EINVAL;
p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
if (!p_counter_index)
return -EINVAL;
mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
MLXSW_REG_RICNT_OPCODE_CLEAR);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
if (err)
return err;
if (!set)
return 0;
#define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \
(set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
#undef MLXSW_SP_RIF_COUNTER_EXTRACT
return 0;
}
static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index)
{
......@@ -242,9 +300,13 @@ int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
unsigned int *p_counter_index;
int err;
if (mlxsw_sp_rif_counter_valid_get(rif, dir))
return 0;
p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
if (!p_counter_index)
return -EINVAL;
err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
p_counter_index);
if (err)
......@@ -8146,6 +8208,166 @@ u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
return lb_rif->ul_rif_id;
}
static bool
mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
{
return mlxsw_sp_rif_counter_valid_get(rif,
MLXSW_SP_RIF_COUNTER_EGRESS) &&
mlxsw_sp_rif_counter_valid_get(rif,
MLXSW_SP_RIF_COUNTER_INGRESS);
}
static int
mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
{
int err;
err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
if (err)
return err;
/* Clear stale data. */
err = mlxsw_sp_rif_counter_fetch_clear(rif,
MLXSW_SP_RIF_COUNTER_INGRESS,
NULL);
if (err)
goto err_clear_ingress;
err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
if (err)
goto err_alloc_egress;
/* Clear stale data. */
err = mlxsw_sp_rif_counter_fetch_clear(rif,
MLXSW_SP_RIF_COUNTER_EGRESS,
NULL);
if (err)
goto err_clear_egress;
return 0;
err_clear_egress:
mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
err_alloc_egress:
err_clear_ingress:
mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
return err;
}
static void
mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
{
mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
}
static void
mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
struct netdev_notifier_offload_xstats_info *info)
{
if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
return;
netdev_offload_xstats_report_used(info->report_used);
}
static int
mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
struct rtnl_hw_stats64 *p_stats)
{
struct mlxsw_sp_rif_counter_set_basic ingress;
struct mlxsw_sp_rif_counter_set_basic egress;
int err;
err = mlxsw_sp_rif_counter_fetch_clear(rif,
MLXSW_SP_RIF_COUNTER_INGRESS,
&ingress);
if (err)
return err;
err = mlxsw_sp_rif_counter_fetch_clear(rif,
MLXSW_SP_RIF_COUNTER_EGRESS,
&egress);
if (err)
return err;
#define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \
((SET.good_unicast_ ## SFX) + \
(SET.good_multicast_ ## SFX) + \
(SET.good_broadcast_ ## SFX))
p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
p_stats->rx_errors = ingress.error_packets;
p_stats->tx_errors = egress.error_packets;
p_stats->rx_dropped = ingress.discard_packets;
p_stats->tx_dropped = egress.discard_packets;
p_stats->multicast = ingress.good_multicast_packets +
ingress.good_broadcast_packets;
#undef MLXSW_SP_ROUTER_ALL_GOOD
return 0;
}
static int
mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
struct netdev_notifier_offload_xstats_info *info)
{
struct rtnl_hw_stats64 stats = {};
int err;
if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
return 0;
err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
if (err)
return err;
netdev_offload_xstats_report_delta(info->report_delta, &stats);
return 0;
}
struct mlxsw_sp_router_hwstats_notify_work {
struct work_struct work;
struct net_device *dev;
};
static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
{
struct mlxsw_sp_router_hwstats_notify_work *hws_work =
container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
work);
rtnl_lock();
rtnl_offload_xstats_notify(hws_work->dev);
rtnl_unlock();
dev_put(hws_work->dev);
kfree(hws_work);
}
static void
mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
{
struct mlxsw_sp_router_hwstats_notify_work *hws_work;
/* To collect notification payload, the core ends up sending another
* notifier block message, which would deadlock on the attempt to
* acquire the router lock again. Just postpone the notification until
* later.
*/
hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
if (!hws_work)
return;
INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
dev_hold(dev);
hws_work->dev = dev;
mlxsw_core_schedule_work(&hws_work->work);
}
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
{
return rif->dev->ifindex;
......@@ -8156,6 +8378,16 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
return rif->dev;
}
static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
{
struct rtnl_hw_stats64 stats = {};
if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
netdev_offload_xstats_push_delta(rif->dev,
NETDEV_OFFLOAD_XSTATS_TYPE_L3,
&stats);
}
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
......@@ -8216,10 +8448,19 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
goto err_mr_rif_add;
}
mlxsw_sp_rif_counters_alloc(rif);
if (netdev_offload_xstats_enabled(rif->dev,
NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
err = mlxsw_sp_router_port_l3_stats_enable(rif);
if (err)
goto err_stats_enable;
mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
} else {
mlxsw_sp_rif_counters_alloc(rif);
}
return rif;
err_stats_enable:
err_mr_rif_add:
for (i--; i >= 0; i--)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
......@@ -8249,7 +8490,15 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
mlxsw_sp_rif_counters_free(rif);
if (netdev_offload_xstats_enabled(rif->dev,
NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
mlxsw_sp_rif_push_l3_stats(rif);
mlxsw_sp_router_port_l3_stats_disable(rif);
mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
} else {
mlxsw_sp_rif_counters_free(rif);
}
for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
ops->deconfigure(rif);
......@@ -9126,6 +9375,35 @@ static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
return -ENOBUFS;
}
static int
mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
unsigned long event,
struct netdev_notifier_offload_xstats_info *info)
{
switch (info->type) {
case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
break;
default:
return 0;
}
switch (event) {
case NETDEV_OFFLOAD_XSTATS_ENABLE:
return mlxsw_sp_router_port_l3_stats_enable(rif);
case NETDEV_OFFLOAD_XSTATS_DISABLE:
mlxsw_sp_router_port_l3_stats_disable(rif);
return 0;
case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
mlxsw_sp_router_port_l3_stats_report_used(rif, info);
return 0;
case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
}
WARN_ON_ONCE(1);
return 0;
}
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
unsigned long event, void *ptr)
{
......@@ -9151,6 +9429,15 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
case NETDEV_PRE_CHANGEADDR:
err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
break;
case NETDEV_OFFLOAD_XSTATS_ENABLE:
case NETDEV_OFFLOAD_XSTATS_DISABLE:
case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
err = mlxsw_sp_router_port_offload_xstats_cmd(rif, event, ptr);
break;
default:
WARN_ON_ONCE(1);
break;
}
out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment