Commit 9309f97a authored by Petr Machata's avatar Petr Machata Committed by David S. Miller

net: dev: Add hardware stats support

Offloading switch device drivers may be able to collect statistics of the
traffic taking place in the HW datapath that pertains to a certain soft
netdevice, such as VLAN. Add the necessary infrastructure to allow exposing
these statistics to the offloaded netdevice in question. The API was shaped
by the following considerations:

- Collection of HW statistics is not free: there may be a finite number of
  counters, and the act of counting may have a performance impact. It is
  therefore necessary to allow toggling whether HW counting should be done
  for any particular SW netdevice.

- As the drivers are loaded and removed, a particular device may get
  offloaded and unoffloaded again. At the same time, the statistics values
  need to stay monotonic (modulo the eventual 64-bit wraparound),
  increasing only to reflect traffic measured in the device.

  To that end, the netdevice keeps around a lazily-allocated copy of struct
  rtnl_link_stats64. Device drivers then contribute to the values kept
  therein at various points. Even as the driver goes away, the struct stays
  around to maintain the statistics values.

- Different HW devices may be able to count different things. The
  motivation behind this patch in particular is exposure of HW counters on
  Nvidia Spectrum switches, where the only practical approach to counting
  traffic on offloaded soft netdevices currently is to use router interface
  counters, and count L3 traffic. Correspondingly that is the statistics
  suite added in this patch.

  Other devices may be able to measure different kinds of traffic, and for
  that reason, the APIs are built to allow uniform access to different
  statistics suites.

- Because soft netdevices and offloading drivers are only loosely bound, a
  netdevice uses a notifier chain to communicate with the drivers. Several
  new notifiers, NETDEV_OFFLOAD_XSTATS_*, have been added to carry messages
  to the offloading drivers.

- Devices can have various conditions for when a particular counter is
  available. As the device is configured and reconfigured, the device
  offload may become or cease being suitable for counter binding. A
  netdevice can use a notifier type NETDEV_OFFLOAD_XSTATS_REPORT_USED to
  ping offloading drivers and determine whether anyone currently implements
  a given statistics suite. This information can then be propagated to user
  space.

  When the driver decides to unoffload a netdevice, it can use a
  newly-added function, netdev_offload_xstats_report_delta(), to record
  outstanding collected statistics, before destroying the HW counter.

This patch adds a helper, call_netdevice_notifiers_info_robust(), for
dispatching a notifier with the possibility of unwind when one of the
consumers bails. Given the wish to eventually get rid of the global
notifier block altogether, this helper only invokes the per-netns notifier
block.
Signed-off-by: default avatarPetr Machata <petrm@nvidia.com>
Signed-off-by: default avatarIdo Schimmel <idosch@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 216e6906
...@@ -1950,6 +1950,7 @@ enum netdev_ml_priv_type { ...@@ -1950,6 +1950,7 @@ enum netdev_ml_priv_type {
* @watchdog_dev_tracker: refcount tracker used by watchdog. * @watchdog_dev_tracker: refcount tracker used by watchdog.
* @dev_registered_tracker: tracker for reference held while * @dev_registered_tracker: tracker for reference held while
* registered * registered
* @offload_xstats_l3: L3 HW stats for this netdevice.
* *
* FIXME: cleanup struct net_device such that network protocol info * FIXME: cleanup struct net_device such that network protocol info
* moves out. * moves out.
...@@ -2287,6 +2288,7 @@ struct net_device { ...@@ -2287,6 +2288,7 @@ struct net_device {
netdevice_tracker linkwatch_dev_tracker; netdevice_tracker linkwatch_dev_tracker;
netdevice_tracker watchdog_dev_tracker; netdevice_tracker watchdog_dev_tracker;
netdevice_tracker dev_registered_tracker; netdevice_tracker dev_registered_tracker;
struct rtnl_hw_stats64 *offload_xstats_l3;
}; };
#define to_net_dev(d) container_of(d, struct net_device, dev) #define to_net_dev(d) container_of(d, struct net_device, dev)
...@@ -2727,6 +2729,10 @@ enum netdev_cmd { ...@@ -2727,6 +2729,10 @@ enum netdev_cmd {
NETDEV_CVLAN_FILTER_DROP_INFO, NETDEV_CVLAN_FILTER_DROP_INFO,
NETDEV_SVLAN_FILTER_PUSH_INFO, NETDEV_SVLAN_FILTER_PUSH_INFO,
NETDEV_SVLAN_FILTER_DROP_INFO, NETDEV_SVLAN_FILTER_DROP_INFO,
NETDEV_OFFLOAD_XSTATS_ENABLE,
NETDEV_OFFLOAD_XSTATS_DISABLE,
NETDEV_OFFLOAD_XSTATS_REPORT_USED,
NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
}; };
const char *netdev_cmd_to_name(enum netdev_cmd cmd); const char *netdev_cmd_to_name(enum netdev_cmd cmd);
...@@ -2777,6 +2783,42 @@ struct netdev_notifier_pre_changeaddr_info { ...@@ -2777,6 +2783,42 @@ struct netdev_notifier_pre_changeaddr_info {
const unsigned char *dev_addr; const unsigned char *dev_addr;
}; };
enum netdev_offload_xstats_type {
NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1,
};
struct netdev_notifier_offload_xstats_info {
struct netdev_notifier_info info; /* must be first */
enum netdev_offload_xstats_type type;
union {
/* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */
struct netdev_notifier_offload_xstats_rd *report_delta;
/* NETDEV_OFFLOAD_XSTATS_REPORT_USED */
struct netdev_notifier_offload_xstats_ru *report_used;
};
};
int netdev_offload_xstats_enable(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct netlink_ext_ack *extack);
int netdev_offload_xstats_disable(struct net_device *dev,
enum netdev_offload_xstats_type type);
bool netdev_offload_xstats_enabled(const struct net_device *dev,
enum netdev_offload_xstats_type type);
int netdev_offload_xstats_get(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct rtnl_hw_stats64 *stats, bool *used,
struct netlink_ext_ack *extack);
void
netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd,
const struct rtnl_hw_stats64 *stats);
void
netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru);
void netdev_offload_xstats_push_delta(struct net_device *dev,
enum netdev_offload_xstats_type type,
const struct rtnl_hw_stats64 *stats);
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev) struct net_device *dev)
{ {
......
...@@ -245,6 +245,21 @@ struct rtnl_link_stats64 { ...@@ -245,6 +245,21 @@ struct rtnl_link_stats64 {
__u64 rx_nohandler; __u64 rx_nohandler;
}; };
/* Subset of link stats useful for in-HW collection. Meaning of the fields is as
* for struct rtnl_link_stats64.
*/
struct rtnl_hw_stats64 {
__u64 rx_packets;
__u64 tx_packets;
__u64 rx_bytes;
__u64 tx_bytes;
__u64 rx_errors;
__u64 tx_errors;
__u64 rx_dropped;
__u64 tx_dropped;
__u64 multicast;
};
/* The struct should be in sync with struct ifmap */ /* The struct should be in sync with struct ifmap */
struct rtnl_link_ifmap { struct rtnl_link_ifmap {
__u64 mem_start; __u64 mem_start;
......
...@@ -1622,7 +1622,8 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd) ...@@ -1622,7 +1622,8 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd)
N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
N(PRE_CHANGEADDR) N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
} }
#undef N #undef N
return "UNKNOWN_NETDEV_EVENT"; return "UNKNOWN_NETDEV_EVENT";
...@@ -1939,6 +1940,32 @@ static int call_netdevice_notifiers_info(unsigned long val, ...@@ -1939,6 +1940,32 @@ static int call_netdevice_notifiers_info(unsigned long val,
return raw_notifier_call_chain(&netdev_chain, val, info); return raw_notifier_call_chain(&netdev_chain, val, info);
} }
/**
* call_netdevice_notifiers_info_robust - call per-netns notifier blocks
* for and rollback on error
* @val_up: value passed unmodified to notifier function
* @val_down: value passed unmodified to the notifier function when
* recovering from an error on @val_up
* @info: notifier information data
*
* Call all per-netns network notifier blocks, but not notifier blocks on
* the global notifier chain. Parameters and return value are as for
* raw_notifier_call_chain_robust().
*/
static int
call_netdevice_notifiers_info_robust(unsigned long val_up,
unsigned long val_down,
struct netdev_notifier_info *info)
{
struct net *net = dev_net(info->dev);
ASSERT_RTNL();
return raw_notifier_call_chain_robust(&net->netdev_chain,
val_up, val_down, info);
}
static int call_netdevice_notifiers_extack(unsigned long val, static int call_netdevice_notifiers_extack(unsigned long val,
struct net_device *dev, struct net_device *dev,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
...@@ -7728,6 +7755,242 @@ void netdev_bonding_info_change(struct net_device *dev, ...@@ -7728,6 +7755,242 @@ void netdev_bonding_info_change(struct net_device *dev,
} }
EXPORT_SYMBOL(netdev_bonding_info_change); EXPORT_SYMBOL(netdev_bonding_info_change);
static int netdev_offload_xstats_enable_l3(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct netdev_notifier_offload_xstats_info info = {
.info.dev = dev,
.info.extack = extack,
.type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
};
int err;
int rc;
dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
GFP_KERNEL);
if (!dev->offload_xstats_l3)
return -ENOMEM;
rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE,
NETDEV_OFFLOAD_XSTATS_DISABLE,
&info.info);
err = notifier_to_errno(rc);
if (err)
goto free_stats;
return 0;
free_stats:
kfree(dev->offload_xstats_l3);
dev->offload_xstats_l3 = NULL;
return err;
}
int netdev_offload_xstats_enable(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct netlink_ext_ack *extack)
{
ASSERT_RTNL();
if (netdev_offload_xstats_enabled(dev, type))
return -EALREADY;
switch (type) {
case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
return netdev_offload_xstats_enable_l3(dev, extack);
}
WARN_ON(1);
return -EINVAL;
}
EXPORT_SYMBOL(netdev_offload_xstats_enable);
static void netdev_offload_xstats_disable_l3(struct net_device *dev)
{
struct netdev_notifier_offload_xstats_info info = {
.info.dev = dev,
.type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
};
call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE,
&info.info);
kfree(dev->offload_xstats_l3);
dev->offload_xstats_l3 = NULL;
}
int netdev_offload_xstats_disable(struct net_device *dev,
enum netdev_offload_xstats_type type)
{
ASSERT_RTNL();
if (!netdev_offload_xstats_enabled(dev, type))
return -EALREADY;
switch (type) {
case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
netdev_offload_xstats_disable_l3(dev);
return 0;
}
WARN_ON(1);
return -EINVAL;
}
EXPORT_SYMBOL(netdev_offload_xstats_disable);
static void netdev_offload_xstats_disable_all(struct net_device *dev)
{
netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
}
static struct rtnl_hw_stats64 *
netdev_offload_xstats_get_ptr(const struct net_device *dev,
enum netdev_offload_xstats_type type)
{
switch (type) {
case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
return dev->offload_xstats_l3;
}
WARN_ON(1);
return NULL;
}
bool netdev_offload_xstats_enabled(const struct net_device *dev,
enum netdev_offload_xstats_type type)
{
ASSERT_RTNL();
return netdev_offload_xstats_get_ptr(dev, type);
}
EXPORT_SYMBOL(netdev_offload_xstats_enabled);
struct netdev_notifier_offload_xstats_ru {
bool used;
};
struct netdev_notifier_offload_xstats_rd {
struct rtnl_hw_stats64 stats;
bool used;
};
static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest,
const struct rtnl_hw_stats64 *src)
{
dest->rx_packets += src->rx_packets;
dest->tx_packets += src->tx_packets;
dest->rx_bytes += src->rx_bytes;
dest->tx_bytes += src->tx_bytes;
dest->rx_errors += src->rx_errors;
dest->tx_errors += src->tx_errors;
dest->rx_dropped += src->rx_dropped;
dest->tx_dropped += src->tx_dropped;
dest->multicast += src->multicast;
}
static int netdev_offload_xstats_get_used(struct net_device *dev,
enum netdev_offload_xstats_type type,
bool *p_used,
struct netlink_ext_ack *extack)
{
struct netdev_notifier_offload_xstats_ru report_used = {};
struct netdev_notifier_offload_xstats_info info = {
.info.dev = dev,
.info.extack = extack,
.type = type,
.report_used = &report_used,
};
int rc;
WARN_ON(!netdev_offload_xstats_enabled(dev, type));
rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED,
&info.info);
*p_used = report_used.used;
return notifier_to_errno(rc);
}
static int netdev_offload_xstats_get_stats(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct rtnl_hw_stats64 *p_stats,
bool *p_used,
struct netlink_ext_ack *extack)
{
struct netdev_notifier_offload_xstats_rd report_delta = {};
struct netdev_notifier_offload_xstats_info info = {
.info.dev = dev,
.info.extack = extack,
.type = type,
.report_delta = &report_delta,
};
struct rtnl_hw_stats64 *stats;
int rc;
stats = netdev_offload_xstats_get_ptr(dev, type);
if (WARN_ON(!stats))
return -EINVAL;
rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
&info.info);
/* Cache whatever we got, even if there was an error, otherwise the
* successful stats retrievals would get lost.
*/
netdev_hw_stats64_add(stats, &report_delta.stats);
if (p_stats)
*p_stats = *stats;
*p_used = report_delta.used;
return notifier_to_errno(rc);
}
int netdev_offload_xstats_get(struct net_device *dev,
enum netdev_offload_xstats_type type,
struct rtnl_hw_stats64 *p_stats, bool *p_used,
struct netlink_ext_ack *extack)
{
ASSERT_RTNL();
if (p_stats)
return netdev_offload_xstats_get_stats(dev, type, p_stats,
p_used, extack);
else
return netdev_offload_xstats_get_used(dev, type, p_used,
extack);
}
EXPORT_SYMBOL(netdev_offload_xstats_get);
void
netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta,
const struct rtnl_hw_stats64 *stats)
{
report_delta->used = true;
netdev_hw_stats64_add(&report_delta->stats, stats);
}
EXPORT_SYMBOL(netdev_offload_xstats_report_delta);
void
netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used)
{
report_used->used = true;
}
EXPORT_SYMBOL(netdev_offload_xstats_report_used);
void netdev_offload_xstats_push_delta(struct net_device *dev,
enum netdev_offload_xstats_type type,
const struct rtnl_hw_stats64 *p_stats)
{
struct rtnl_hw_stats64 *stats;
ASSERT_RTNL();
stats = netdev_offload_xstats_get_ptr(dev, type);
if (WARN_ON(!stats))
return;
netdev_hw_stats64_add(stats, p_stats);
}
EXPORT_SYMBOL(netdev_offload_xstats_push_delta);
/** /**
* netdev_get_xmit_slave - Get the xmit slave of master device * netdev_get_xmit_slave - Get the xmit slave of master device
* @dev: device * @dev: device
...@@ -10417,6 +10680,8 @@ void unregister_netdevice_many(struct list_head *head) ...@@ -10417,6 +10680,8 @@ void unregister_netdevice_many(struct list_head *head)
dev_xdp_uninstall(dev); dev_xdp_uninstall(dev);
netdev_offload_xstats_disable_all(dev);
/* Notify protocols, that we are about to destroy /* Notify protocols, that we are about to destroy
* this device. They should clean all the things. * this device. They should clean all the things.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment