Commit 82fd151d authored by Shailend Chand's avatar Shailend Chand Committed by David S. Miller

gve: Reduce alloc and copy costs in the GQ rx path

Previously, even if just one of the many fragments of a 9k packet
required a copy, we'd copy the whole packet into a freshly-allocated
9k-sized linear SKB, and this led to performance issues.

By having a pool of pages to copy into, each fragment can be
independently handled, leading to a reduced incidence of
allocation and copy.
Signed-off-by: default avatarShailend Chand <shailend@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d08b0f8f
...@@ -60,7 +60,8 @@ struct gve_rx_slot_page_info { ...@@ -60,7 +60,8 @@ struct gve_rx_slot_page_info {
void *page_address; void *page_address;
u32 page_offset; /* offset to write to in page */ u32 page_offset; /* offset to write to in page */
int pagecnt_bias; /* expected pagecnt if only the driver has a ref */ int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
u8 can_flip; u16 pad; /* adjustment for rx padding */
u8 can_flip; /* tracks if the networking stack is using the page */
}; };
/* A list of pages registered with the device during setup and used by a queue /* A list of pages registered with the device during setup and used by a queue
...@@ -149,10 +150,17 @@ struct gve_rx_ctx { ...@@ -149,10 +150,17 @@ struct gve_rx_ctx {
/* head and tail of skb chain for the current packet or NULL if none */ /* head and tail of skb chain for the current packet or NULL if none */
struct sk_buff *skb_head; struct sk_buff *skb_head;
struct sk_buff *skb_tail; struct sk_buff *skb_tail;
u16 total_expected_size; u32 total_size;
u8 expected_frag_cnt; u8 frag_cnt;
u8 curr_frag_cnt; bool drop_pkt;
u8 reuse_frags; };
struct gve_rx_cnts {
u32 ok_pkt_bytes;
u16 ok_pkt_cnt;
u16 total_pkt_cnt;
u16 cont_pkt_cnt;
u16 desc_err_pkt_cnt;
}; };
/* Contains datapath state used to represent an RX queue. */ /* Contains datapath state used to represent an RX queue. */
...@@ -167,6 +175,10 @@ struct gve_rx_ring { ...@@ -167,6 +175,10 @@ struct gve_rx_ring {
/* threshold for posting new buffs and descs */ /* threshold for posting new buffs and descs */
u32 db_threshold; u32 db_threshold;
u16 packet_buffer_size; u16 packet_buffer_size;
u32 qpl_copy_pool_mask;
u32 qpl_copy_pool_head;
struct gve_rx_slot_page_info *qpl_copy_pool;
}; };
/* DQO fields. */ /* DQO fields. */
...@@ -216,7 +228,9 @@ struct gve_rx_ring { ...@@ -216,7 +228,9 @@ struct gve_rx_ring {
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */ u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */ u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
u32 q_num; /* queue index */ u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */ u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */ struct gve_queue_resources *q_resources; /* head and tail pointer idx */
......
...@@ -45,6 +45,7 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { ...@@ -45,6 +45,7 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
"rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]", "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]",
"rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
"rx_frag_alloc_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]", "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]", "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
...@@ -271,6 +272,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -271,6 +272,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->rx_cont_packet_cnt; data[i++] = rx->rx_cont_packet_cnt;
data[i++] = rx->rx_frag_flip_cnt; data[i++] = rx->rx_frag_flip_cnt;
data[i++] = rx->rx_frag_copy_cnt; data[i++] = rx->rx_frag_copy_cnt;
data[i++] = rx->rx_frag_alloc_cnt;
/* rx dropped packets */ /* rx dropped packets */
data[i++] = tmp_rx_skb_alloc_fail + data[i++] = tmp_rx_skb_alloc_fail +
tmp_rx_buf_alloc_fail + tmp_rx_buf_alloc_fail +
......
This diff is collapsed.
...@@ -568,7 +568,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, ...@@ -568,7 +568,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (eop && buf_len <= priv->rx_copybreak) { if (eop && buf_len <= priv->rx_copybreak) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len, 0, NULL); &buf_state->page_info, buf_len, 0);
if (unlikely(!rx->ctx.skb_head)) if (unlikely(!rx->ctx.skb_head))
goto error; goto error;
rx->ctx.skb_tail = rx->ctx.skb_head; rx->ctx.skb_tail = rx->ctx.skb_head;
......
...@@ -50,34 +50,18 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) ...@@ -50,34 +50,18 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len, struct gve_rx_slot_page_info *page_info, u16 len,
u16 padding, struct gve_rx_ctx *ctx) u16 padding)
{ {
void *va = page_info->page_address + padding + page_info->page_offset; void *va = page_info->page_address + padding + page_info->page_offset;
int skb_linear_offset = 0;
bool set_protocol = false;
struct sk_buff *skb; struct sk_buff *skb;
if (ctx) { skb = napi_alloc_skb(napi, len);
if (!ctx->skb_head) if (unlikely(!skb))
ctx->skb_head = napi_alloc_skb(napi, ctx->total_expected_size); return NULL;
if (unlikely(!ctx->skb_head))
return NULL;
skb = ctx->skb_head;
skb_linear_offset = skb->len;
set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
} else {
skb = napi_alloc_skb(napi, len);
if (unlikely(!skb))
return NULL;
set_protocol = true;
}
__skb_put(skb, len);
skb_copy_to_linear_data_offset(skb, skb_linear_offset, va, len);
if (set_protocol) __skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev); skb_copy_to_linear_data_offset(skb, 0, va, len);
skb->protocol = eth_type_trans(skb, dev);
return skb; return skb;
} }
......
...@@ -19,7 +19,7 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx); ...@@ -19,7 +19,7 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len, struct gve_rx_slot_page_info *page_info, u16 len,
u16 pad, struct gve_rx_ctx *ctx); u16 pad);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */ /* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info); void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment