Commit e5a28026 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'add-tx-stop-wake-counters'

Daniel Jurgens says:

====================
Add TX stop/wake counters

Several drivers provide TX stop and wake counters via ethtool stats. Add
those to the netdev queue stats, and use them in virtio_net.
====================

Link: https://lore.kernel.org/r/20240510201927.1821109-1-danielj@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents c084ebd7 c39add9b
......@@ -439,6 +439,20 @@ attribute-sets:
Number of the packets dropped by the device due to the transmit
packets bitrate exceeding the device rate limit.
type: uint
-
name: tx-stop
doc: |
Number of times driver paused accepting new tx packets
from the stack to this queue, because the queue was full.
Note that if BQL is supported and enabled on the device
the networking stack will avoid queuing a lot of data at once.
type: uint
-
name: tx-wake
doc: |
Number of times driver re-started accepting send
requests to this queue from the stack.
type: uint
operations:
list:
......
......@@ -95,6 +95,8 @@ struct virtnet_sq_stats {
u64_stats_t xdp_tx_drops;
u64_stats_t kicks;
u64_stats_t tx_timeouts;
u64_stats_t stop;
u64_stats_t wake;
};
struct virtnet_rq_stats {
......@@ -145,6 +147,8 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] = {
VIRTNET_SQ_STAT_QSTAT("packets", packets),
VIRTNET_SQ_STAT_QSTAT("bytes", bytes),
VIRTNET_SQ_STAT_QSTAT("stop", stop),
VIRTNET_SQ_STAT_QSTAT("wake", wake),
};
static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] = {
......@@ -1014,6 +1018,9 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
*/
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
netif_stop_subqueue(dev, qnum);
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.stop);
u64_stats_update_end(&sq->stats.syncp);
if (use_napi) {
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
virtqueue_napi_schedule(&sq->napi, sq->vq);
......@@ -1022,6 +1029,9 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
free_old_xmit(sq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.wake);
u64_stats_update_end(&sq->stats.syncp);
virtqueue_disable_cb(sq->vq);
}
}
......@@ -2322,8 +2332,14 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
free_old_xmit(sq, true);
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
if (netif_tx_queue_stopped(txq)) {
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.wake);
u64_stats_update_end(&sq->stats.syncp);
}
netif_tx_wake_queue(txq);
}
__netif_tx_unlock(txq);
}
......@@ -2473,8 +2489,14 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
virtqueue_disable_cb(sq->vq);
free_old_xmit(sq, true);
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
if (netif_tx_queue_stopped(txq)) {
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.wake);
u64_stats_update_end(&sq->stats.syncp);
}
netif_tx_wake_queue(txq);
}
opaque = virtqueue_enable_cb_prepare(sq->vq);
......@@ -4789,6 +4811,8 @@ static void virtnet_get_base_stats(struct net_device *dev,
tx->bytes = 0;
tx->packets = 0;
tx->stop = 0;
tx->wake = 0;
if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
tx->hw_drops = 0;
......
......@@ -41,6 +41,9 @@ struct netdev_queue_stats_tx {
u64 hw_gso_wire_bytes;
u64 hw_drop_ratelimits;
u64 stop;
u64 wake;
};
/**
......
......@@ -165,6 +165,8 @@ enum {
NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS,
NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES,
NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS,
NETDEV_A_QSTATS_TX_STOP,
NETDEV_A_QSTATS_TX_WAKE,
__NETDEV_A_QSTATS_MAX,
NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)
......
......@@ -517,7 +517,9 @@ netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) ||
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) ||
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) ||
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits))
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) ||
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) ||
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake))
return -EMSGSIZE;
return 0;
}
......
......@@ -165,6 +165,8 @@ enum {
NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS,
NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES,
NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS,
NETDEV_A_QSTATS_TX_STOP,
NETDEV_A_QSTATS_TX_WAKE,
__NETDEV_A_QSTATS_MAX,
NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment