Commit 433e274b authored by Kuo Zhao's avatar Kuo Zhao Committed by David S. Miller

gve: Add stats for gve.

Sample output of "ethtool -S <interface-name>" with 1 RX queue and 1 TX
queue:
NIC statistics:
     rx_packets: 1039
     tx_packets: 37
     rx_bytes: 822071
     tx_bytes: 4100
     rx_dropped: 0
     tx_dropped: 0
     tx_timeouts: 0
     rx_skb_alloc_fail: 0
     rx_buf_alloc_fail: 0
     rx_desc_err_dropped_pkt: 0
     interface_up_cnt: 1
     interface_down_cnt: 0
     reset_cnt: 0
     page_alloc_fail: 0
     dma_mapping_error: 0
     rx_posted_desc[0]: 1365
     rx_completed_desc[0]: 341
     rx_bytes[0]: 215094
     rx_dropped_pkt[0]: 0
     rx_copybreak_pkt[0]: 3
     rx_copied_pkt[0]: 3
     tx_posted_desc[0]: 6
     tx_completed_desc[0]: 6
     tx_bytes[0]: 420
     tx_wake[0]: 0
     tx_stop[0]: 0
     tx_event_counter[0]: 6
     adminq_prod_cnt: 34
     adminq_cmd_fail: 0
     adminq_timeouts: 0
     adminq_describe_device_cnt: 1
     adminq_cfg_device_resources_cnt: 1
     adminq_register_page_list_cnt: 16
     adminq_unregister_page_list_cnt: 0
     adminq_create_tx_queue_cnt: 8
     adminq_create_rx_queue_cnt: 8
     adminq_destroy_tx_queue_cnt: 0
     adminq_destroy_rx_queue_cnt: 0
     adminq_dcfg_device_resources_cnt: 0
     adminq_set_driver_parameter_cnt: 0
Reviewed-by: default avatarYangchun Fu <yangchun@google.com>
Signed-off-by: default avatarKuo Zhao <kuozhao@google.com>
Signed-off-by: default avatarDavid Awogbemila <awogbemila@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d5f7543c
...@@ -71,6 +71,11 @@ struct gve_rx_ring { ...@@ -71,6 +71,11 @@ struct gve_rx_ring {
u32 cnt; /* free-running total number of completed packets */ u32 cnt; /* free-running total number of completed packets */
u32 fill_cnt; /* free-running total number of descs and buffs posted */ u32 fill_cnt; /* free-running total number of descs and buffs posted */
u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
u64 rx_copied_pkt; /* free-running total number of copied packets */
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
u32 q_num; /* queue index */ u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */ u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */ struct gve_queue_resources *q_resources; /* head and tail pointer idx */
...@@ -202,6 +207,26 @@ struct gve_priv { ...@@ -202,6 +207,26 @@ struct gve_priv {
dma_addr_t adminq_bus_addr; dma_addr_t adminq_bus_addr;
u32 adminq_mask; /* masks prod_cnt to adminq size */ u32 adminq_mask; /* masks prod_cnt to adminq size */
u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */ u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
/* free-running count of per AQ cmd executed */
u32 adminq_describe_device_cnt;
u32 adminq_cfg_device_resources_cnt;
u32 adminq_register_page_list_cnt;
u32 adminq_unregister_page_list_cnt;
u32 adminq_create_tx_queue_cnt;
u32 adminq_create_rx_queue_cnt;
u32 adminq_destroy_tx_queue_cnt;
u32 adminq_destroy_rx_queue_cnt;
u32 adminq_dcfg_device_resources_cnt;
u32 adminq_set_driver_parameter_cnt;
/* Global stats */
u32 interface_up_cnt; /* count of times interface turned up since last reset */
u32 interface_down_cnt; /* count of times interface turned down since last reset */
u32 reset_cnt; /* count of reset */
u32 page_alloc_fail; /* count of page alloc fails */
u32 dma_mapping_error; /* count of dma mapping errors */
struct workqueue_struct *gve_wq; struct workqueue_struct *gve_wq;
struct work_struct service_task; struct work_struct service_task;
...@@ -426,7 +451,8 @@ static inline bool gve_can_recycle_pages(struct net_device *dev) ...@@ -426,7 +451,8 @@ static inline bool gve_can_recycle_pages(struct net_device *dev)
} }
/* buffers */ /* buffers */
int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma, int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
enum dma_data_direction); enum dma_data_direction);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction); enum dma_data_direction);
......
...@@ -23,6 +23,18 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) ...@@ -23,6 +23,18 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1; priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
priv->adminq_prod_cnt = 0; priv->adminq_prod_cnt = 0;
priv->adminq_cmd_fail = 0;
priv->adminq_timeouts = 0;
priv->adminq_describe_device_cnt = 0;
priv->adminq_cfg_device_resources_cnt = 0;
priv->adminq_register_page_list_cnt = 0;
priv->adminq_unregister_page_list_cnt = 0;
priv->adminq_create_tx_queue_cnt = 0;
priv->adminq_create_rx_queue_cnt = 0;
priv->adminq_destroy_tx_queue_cnt = 0;
priv->adminq_destroy_rx_queue_cnt = 0;
priv->adminq_dcfg_device_resources_cnt = 0;
priv->adminq_set_driver_parameter_cnt = 0;
/* Setup Admin queue with the device */ /* Setup Admin queue with the device */
iowrite32be(priv->adminq_bus_addr / PAGE_SIZE, iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
...@@ -81,17 +93,18 @@ static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt) ...@@ -81,17 +93,18 @@ static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
return false; return false;
} }
static int gve_adminq_parse_err(struct device *dev, u32 status) static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
{ {
if (status != GVE_ADMINQ_COMMAND_PASSED && if (status != GVE_ADMINQ_COMMAND_PASSED &&
status != GVE_ADMINQ_COMMAND_UNSET) status != GVE_ADMINQ_COMMAND_UNSET) {
dev_err(dev, "AQ command failed with status %d\n", status); dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
priv->adminq_cmd_fail++;
}
switch (status) { switch (status) {
case GVE_ADMINQ_COMMAND_PASSED: case GVE_ADMINQ_COMMAND_PASSED:
return 0; return 0;
case GVE_ADMINQ_COMMAND_UNSET: case GVE_ADMINQ_COMMAND_UNSET:
dev_err(dev, "parse_aq_err: err and status both unset, this should not be possible.\n"); dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
return -EINVAL; return -EINVAL;
case GVE_ADMINQ_COMMAND_ERROR_ABORTED: case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
case GVE_ADMINQ_COMMAND_ERROR_CANCELLED: case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
...@@ -116,7 +129,7 @@ static int gve_adminq_parse_err(struct device *dev, u32 status) ...@@ -116,7 +129,7 @@ static int gve_adminq_parse_err(struct device *dev, u32 status)
case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED: case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
return -ENOTSUPP; return -ENOTSUPP;
default: default:
dev_err(dev, "parse_aq_err: unknown status code %d\n", status); dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -130,22 +143,60 @@ int gve_adminq_execute_cmd(struct gve_priv *priv, ...@@ -130,22 +143,60 @@ int gve_adminq_execute_cmd(struct gve_priv *priv,
union gve_adminq_command *cmd; union gve_adminq_command *cmd;
u32 status = 0; u32 status = 0;
u32 prod_cnt; u32 prod_cnt;
u32 opcode;
cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask]; cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
priv->adminq_prod_cnt++; priv->adminq_prod_cnt++;
prod_cnt = priv->adminq_prod_cnt; prod_cnt = priv->adminq_prod_cnt;
memcpy(cmd, cmd_orig, sizeof(*cmd_orig)); memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
switch (opcode) {
case GVE_ADMINQ_DESCRIBE_DEVICE:
priv->adminq_describe_device_cnt++;
break;
case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
priv->adminq_cfg_device_resources_cnt++;
break;
case GVE_ADMINQ_REGISTER_PAGE_LIST:
priv->adminq_register_page_list_cnt++;
break;
case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
priv->adminq_unregister_page_list_cnt++;
break;
case GVE_ADMINQ_CREATE_TX_QUEUE:
priv->adminq_create_tx_queue_cnt++;
break;
case GVE_ADMINQ_CREATE_RX_QUEUE:
priv->adminq_create_rx_queue_cnt++;
break;
case GVE_ADMINQ_DESTROY_TX_QUEUE:
priv->adminq_destroy_tx_queue_cnt++;
break;
case GVE_ADMINQ_DESTROY_RX_QUEUE:
priv->adminq_destroy_rx_queue_cnt++;
break;
case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
priv->adminq_dcfg_device_resources_cnt++;
break;
case GVE_ADMINQ_SET_DRIVER_PARAMETER:
priv->adminq_set_driver_parameter_cnt++;
break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
}
gve_adminq_kick_cmd(priv, prod_cnt); gve_adminq_kick_cmd(priv, prod_cnt);
if (!gve_adminq_wait_for_cmd(priv, prod_cnt)) { if (!gve_adminq_wait_for_cmd(priv, prod_cnt)) {
dev_err(&priv->pdev->dev, "AQ command timed out, need to reset AQ\n"); dev_err(&priv->pdev->dev, "AQ command timed out, need to reset AQ\n");
priv->adminq_timeouts++;
return -ENOTRECOVERABLE; return -ENOTRECOVERABLE;
} }
memcpy(cmd_orig, cmd, sizeof(*cmd)); memcpy(cmd_orig, cmd, sizeof(*cmd));
status = be32_to_cpu(READ_ONCE(cmd->status)); status = be32_to_cpu(READ_ONCE(cmd->status));
return gve_adminq_parse_err(&priv->pdev->dev, status); return gve_adminq_parse_err(priv, status);
} }
/* The device specifies that the management vector can either be the first irq /* The device specifies that the management vector can either be the first irq
......
...@@ -34,17 +34,40 @@ static u32 gve_get_msglevel(struct net_device *netdev) ...@@ -34,17 +34,40 @@ static u32 gve_get_msglevel(struct net_device *netdev)
static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
"rx_dropped", "tx_dropped", "tx_timeouts", "rx_dropped", "tx_dropped", "tx_timeouts",
"rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
"interface_up_cnt", "interface_down_cnt", "reset_cnt",
"page_alloc_fail", "dma_mapping_error",
};
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
"rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
};
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]",
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
};
static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
"adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
"adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
"adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
"adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
}; };
#define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats) #define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats)
#define NUM_GVE_TX_CNTS 5 #define GVE_ADMINQ_STATS_LEN ARRAY_SIZE(gve_gstrings_adminq_stats)
#define NUM_GVE_RX_CNTS 2 #define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats)
#define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats)
static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{ {
struct gve_priv *priv = netdev_priv(netdev); struct gve_priv *priv = netdev_priv(netdev);
char *s = (char *)data; char *s = (char *)data;
int i; int i, j;
if (stringset != ETH_SS_STATS) if (stringset != ETH_SS_STATS)
return; return;
...@@ -53,23 +76,21 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -53,23 +76,21 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
sizeof(gve_gstrings_main_stats)); sizeof(gve_gstrings_main_stats));
s += sizeof(gve_gstrings_main_stats); s += sizeof(gve_gstrings_main_stats);
for (i = 0; i < priv->rx_cfg.num_queues; i++) { for (i = 0; i < priv->rx_cfg.num_queues; i++) {
snprintf(s, ETH_GSTRING_LEN, "rx_desc_cnt[%u]", i); for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
s += ETH_GSTRING_LEN; snprintf(s, ETH_GSTRING_LEN, gve_gstrings_rx_stats[j], i);
snprintf(s, ETH_GSTRING_LEN, "rx_desc_fill_cnt[%u]", i);
s += ETH_GSTRING_LEN; s += ETH_GSTRING_LEN;
} }
}
for (i = 0; i < priv->tx_cfg.num_queues; i++) { for (i = 0; i < priv->tx_cfg.num_queues; i++) {
snprintf(s, ETH_GSTRING_LEN, "tx_req[%u]", i); for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
s += ETH_GSTRING_LEN; snprintf(s, ETH_GSTRING_LEN, gve_gstrings_tx_stats[j], i);
snprintf(s, ETH_GSTRING_LEN, "tx_done[%u]", i);
s += ETH_GSTRING_LEN;
snprintf(s, ETH_GSTRING_LEN, "tx_wake[%u]", i);
s += ETH_GSTRING_LEN;
snprintf(s, ETH_GSTRING_LEN, "tx_stop[%u]", i);
s += ETH_GSTRING_LEN;
snprintf(s, ETH_GSTRING_LEN, "tx_event_counter[%u]", i);
s += ETH_GSTRING_LEN; s += ETH_GSTRING_LEN;
} }
}
memcpy(s, *gve_gstrings_adminq_stats,
sizeof(gve_gstrings_adminq_stats));
s += sizeof(gve_gstrings_adminq_stats);
} }
static int gve_get_sset_count(struct net_device *netdev, int sset) static int gve_get_sset_count(struct net_device *netdev, int sset)
...@@ -78,7 +99,7 @@ static int gve_get_sset_count(struct net_device *netdev, int sset) ...@@ -78,7 +99,7 @@ static int gve_get_sset_count(struct net_device *netdev, int sset)
switch (sset) { switch (sset) {
case ETH_SS_STATS: case ETH_SS_STATS:
return GVE_MAIN_STATS_LEN + return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
(priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) + (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
(priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS); (priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS);
default: default:
...@@ -90,24 +111,40 @@ static void ...@@ -90,24 +111,40 @@ static void
gve_get_ethtool_stats(struct net_device *netdev, gve_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
struct gve_priv *priv = netdev_priv(netdev); u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
u64 rx_pkts, rx_bytes, tx_pkts, tx_bytes; tmp_rx_desc_err_dropped_pkt, tmp_tx_pkts, tmp_tx_bytes;
u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes;
struct gve_priv *priv;
unsigned int start; unsigned int start;
int ring; int ring;
int i; int i;
ASSERT_RTNL(); ASSERT_RTNL();
for (rx_pkts = 0, rx_bytes = 0, ring = 0; priv = netdev_priv(netdev);
for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
ring < priv->rx_cfg.num_queues; ring++) { ring < priv->rx_cfg.num_queues; ring++) {
if (priv->rx) { if (priv->rx) {
do { do {
struct gve_rx_ring *rx = &priv->rx[ring];
start = start =
u64_stats_fetch_begin(&priv->rx[ring].statss); u64_stats_fetch_begin(&priv->rx[ring].statss);
rx_pkts += priv->rx[ring].rpackets; tmp_rx_pkts = rx->rpackets;
rx_bytes += priv->rx[ring].rbytes; tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss, } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start)); start));
rx_pkts += tmp_rx_pkts;
rx_bytes += tmp_rx_bytes;
rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
} }
} }
for (tx_pkts = 0, tx_bytes = 0, ring = 0; for (tx_pkts = 0, tx_bytes = 0, ring = 0;
...@@ -116,10 +153,12 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -116,10 +153,12 @@ gve_get_ethtool_stats(struct net_device *netdev,
do { do {
start = start =
u64_stats_fetch_begin(&priv->tx[ring].statss); u64_stats_fetch_begin(&priv->tx[ring].statss);
tx_pkts += priv->tx[ring].pkt_done; tmp_tx_pkts = priv->tx[ring].pkt_done;
tx_bytes += priv->tx[ring].bytes_done; tmp_tx_bytes = priv->tx[ring].bytes_done;
} while (u64_stats_fetch_retry(&priv->tx[ring].statss, } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start)); start));
tx_pkts += tmp_tx_pkts;
tx_bytes += tmp_tx_bytes;
} }
} }
...@@ -128,9 +167,21 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -128,9 +167,21 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tx_pkts; data[i++] = tx_pkts;
data[i++] = rx_bytes; data[i++] = rx_bytes;
data[i++] = tx_bytes; data[i++] = tx_bytes;
/* Skip rx_dropped and tx_dropped */ /* total rx dropped packets */
i += 2; data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
rx_desc_err_dropped_pkt;
/* Skip tx_dropped */
i++;
data[i++] = priv->tx_timeo_cnt; data[i++] = priv->tx_timeo_cnt;
data[i++] = rx_skb_alloc_fail;
data[i++] = rx_buf_alloc_fail;
data[i++] = rx_desc_err_dropped_pkt;
data[i++] = priv->interface_up_cnt;
data[i++] = priv->interface_down_cnt;
data[i++] = priv->reset_cnt;
data[i++] = priv->page_alloc_fail;
data[i++] = priv->dma_mapping_error;
i = GVE_MAIN_STATS_LEN; i = GVE_MAIN_STATS_LEN;
/* walk RX rings */ /* walk RX rings */
...@@ -138,8 +189,25 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -138,8 +189,25 @@ gve_get_ethtool_stats(struct net_device *netdev,
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
struct gve_rx_ring *rx = &priv->rx[ring]; struct gve_rx_ring *rx = &priv->rx[ring];
data[i++] = rx->cnt;
data[i++] = rx->fill_cnt; data[i++] = rx->fill_cnt;
data[i++] = rx->cnt;
do {
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
/* rx dropped packets */
data[i++] = tmp_rx_skb_alloc_fail +
tmp_rx_buf_alloc_fail +
tmp_rx_desc_err_dropped_pkt;
data[i++] = rx->rx_copybreak_pkt;
data[i++] = rx->rx_copied_pkt;
} }
} else { } else {
i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
...@@ -151,6 +219,13 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -151,6 +219,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tx->req; data[i++] = tx->req;
data[i++] = tx->done; data[i++] = tx->done;
do {
start =
u64_stats_fetch_begin(&priv->tx[ring].statss);
tmp_tx_bytes = tx->bytes_done;
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue; data[i++] = tx->wake_queue;
data[i++] = tx->stop_queue; data[i++] = tx->stop_queue;
data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv, data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
...@@ -159,6 +234,20 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -159,6 +234,20 @@ gve_get_ethtool_stats(struct net_device *netdev,
} else { } else {
i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS; i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS;
} }
/* AQ Stats */
data[i++] = priv->adminq_prod_cnt;
data[i++] = priv->adminq_cmd_fail;
data[i++] = priv->adminq_timeouts;
data[i++] = priv->adminq_describe_device_cnt;
data[i++] = priv->adminq_cfg_device_resources_cnt;
data[i++] = priv->adminq_register_page_list_cnt;
data[i++] = priv->adminq_unregister_page_list_cnt;
data[i++] = priv->adminq_create_tx_queue_cnt;
data[i++] = priv->adminq_create_rx_queue_cnt;
data[i++] = priv->adminq_destroy_tx_queue_cnt;
data[i++] = priv->adminq_destroy_rx_queue_cnt;
data[i++] = priv->adminq_dcfg_device_resources_cnt;
data[i++] = priv->adminq_set_driver_parameter_cnt;
} }
static void gve_get_channels(struct net_device *netdev, static void gve_get_channels(struct net_device *netdev,
...@@ -245,7 +334,8 @@ static int gve_get_tunable(struct net_device *netdev, ...@@ -245,7 +334,8 @@ static int gve_get_tunable(struct net_device *netdev,
} }
static int gve_set_tunable(struct net_device *netdev, static int gve_set_tunable(struct net_device *netdev,
const struct ethtool_tunable *etuna, const void *value) const struct ethtool_tunable *etuna,
const void *value)
{ {
struct gve_priv *priv = netdev_priv(netdev); struct gve_priv *priv = netdev_priv(netdev);
u32 len; u32 len;
......
...@@ -514,14 +514,18 @@ static void gve_free_rings(struct gve_priv *priv) ...@@ -514,14 +514,18 @@ static void gve_free_rings(struct gve_priv *priv)
} }
} }
int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma, int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
*page = alloc_page(GFP_KERNEL); *page = alloc_page(GFP_KERNEL);
if (!*page) if (!*page) {
priv->page_alloc_fail++;
return -ENOMEM; return -ENOMEM;
}
*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir); *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
if (dma_mapping_error(dev, *dma)) { if (dma_mapping_error(dev, *dma)) {
priv->dma_mapping_error++;
put_page(*page); put_page(*page);
return -ENOMEM; return -ENOMEM;
} }
...@@ -556,7 +560,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, ...@@ -556,7 +560,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
return -ENOMEM; return -ENOMEM;
for (i = 0; i < pages; i++) { for (i = 0; i < pages; i++) {
err = gve_alloc_page(&priv->pdev->dev, &qpl->pages[i], err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i], &qpl->page_buses[i],
gve_qpl_dma_dir(priv, id)); gve_qpl_dma_dir(priv, id));
/* caller handles clean up */ /* caller handles clean up */
...@@ -697,6 +701,7 @@ static int gve_open(struct net_device *dev) ...@@ -697,6 +701,7 @@ static int gve_open(struct net_device *dev)
gve_turnup(priv); gve_turnup(priv);
netif_carrier_on(dev); netif_carrier_on(dev);
priv->interface_up_cnt++;
return 0; return 0;
free_rings: free_rings:
...@@ -738,6 +743,7 @@ static int gve_close(struct net_device *dev) ...@@ -738,6 +743,7 @@ static int gve_close(struct net_device *dev)
gve_free_rings(priv); gve_free_rings(priv);
gve_free_qpls(priv); gve_free_qpls(priv);
priv->interface_down_cnt++;
return 0; return 0;
err: err:
...@@ -1047,6 +1053,9 @@ int gve_reset(struct gve_priv *priv, bool attempt_teardown) ...@@ -1047,6 +1053,9 @@ int gve_reset(struct gve_priv *priv, bool attempt_teardown)
/* Set it all back up */ /* Set it all back up */
err = gve_reset_recovery(priv, was_up); err = gve_reset_recovery(priv, was_up);
gve_clear_reset_in_progress(priv); gve_clear_reset_in_progress(priv);
priv->reset_cnt++;
priv->interface_up_cnt = 0;
priv->interface_down_cnt = 0;
return err; return err;
} }
......
...@@ -225,7 +225,8 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags) ...@@ -225,7 +225,8 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
return PKT_HASH_TYPE_L2; return PKT_HASH_TYPE_L2;
} }
static struct sk_buff *gve_rx_copy(struct net_device *dev, static struct sk_buff *gve_rx_copy(struct gve_rx_ring *rx,
struct net_device *dev,
struct napi_struct *napi, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, struct gve_rx_slot_page_info *page_info,
u16 len) u16 len)
...@@ -242,6 +243,11 @@ static struct sk_buff *gve_rx_copy(struct net_device *dev, ...@@ -242,6 +243,11 @@ static struct sk_buff *gve_rx_copy(struct net_device *dev,
skb_copy_to_linear_data(skb, va, len); skb_copy_to_linear_data(skb, va, len);
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
u64_stats_update_begin(&rx->statss);
rx->rx_copied_pkt++;
u64_stats_update_end(&rx->statss);
return skb; return skb;
} }
...@@ -284,8 +290,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, ...@@ -284,8 +290,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
u16 len; u16 len;
/* drop this packet */ /* drop this packet */
if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) {
u64_stats_update_begin(&rx->statss);
rx->rx_desc_err_dropped_pkt++;
u64_stats_update_end(&rx->statss);
return true; return true;
}
len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD; len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
page_info = &rx->data.page_info[idx]; page_info = &rx->data.page_info[idx];
...@@ -300,11 +310,14 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, ...@@ -300,11 +310,14 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
if (PAGE_SIZE == 4096) { if (PAGE_SIZE == 4096) {
if (len <= priv->rx_copybreak) { if (len <= priv->rx_copybreak) {
/* Just copy small packets */ /* Just copy small packets */
skb = gve_rx_copy(dev, napi, page_info, len); skb = gve_rx_copy(rx, dev, napi, page_info, len);
u64_stats_update_begin(&rx->statss);
rx->rx_copybreak_pkt++;
u64_stats_update_end(&rx->statss);
goto have_skb; goto have_skb;
} }
if (unlikely(!gve_can_recycle_pages(dev))) { if (unlikely(!gve_can_recycle_pages(dev))) {
skb = gve_rx_copy(dev, napi, page_info, len); skb = gve_rx_copy(rx, dev, napi, page_info, len);
goto have_skb; goto have_skb;
} }
pagecount = page_count(page_info->page); pagecount = page_count(page_info->page);
...@@ -314,8 +327,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, ...@@ -314,8 +327,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
* stack. * stack.
*/ */
skb = gve_rx_add_frags(dev, napi, page_info, len); skb = gve_rx_add_frags(dev, napi, page_info, len);
if (!skb) if (!skb) {
u64_stats_update_begin(&rx->statss);
rx->rx_skb_alloc_fail++;
u64_stats_update_end(&rx->statss);
return true; return true;
}
/* Make sure the kernel stack can't release the page */ /* Make sure the kernel stack can't release the page */
get_page(page_info->page); get_page(page_info->page);
/* "flip" to other packet buffer on this page */ /* "flip" to other packet buffer on this page */
...@@ -324,21 +341,25 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, ...@@ -324,21 +341,25 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
/* We have previously passed the other half of this /* We have previously passed the other half of this
* page up the stack, but it has not yet been freed. * page up the stack, but it has not yet been freed.
*/ */
skb = gve_rx_copy(dev, napi, page_info, len); skb = gve_rx_copy(rx, dev, napi, page_info, len);
} else { } else {
WARN(pagecount < 1, "Pagecount should never be < 1"); WARN(pagecount < 1, "Pagecount should never be < 1");
return false; return false;
} }
} else { } else {
skb = gve_rx_copy(dev, napi, page_info, len); skb = gve_rx_copy(rx, dev, napi, page_info, len);
} }
have_skb: have_skb:
/* We didn't manage to allocate an skb but we haven't had any /* We didn't manage to allocate an skb but we haven't had any
* reset worthy failures. * reset worthy failures.
*/ */
if (!skb) if (!skb) {
u64_stats_update_begin(&rx->statss);
rx->rx_skb_alloc_fail++;
u64_stats_update_end(&rx->statss);
return true; return true;
}
if (likely(feat & NETIF_F_RXCSUM)) { if (likely(feat & NETIF_F_RXCSUM)) {
/* NIC passes up the partial sum */ /* NIC passes up the partial sum */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment