Commit 37149e93 authored by David Awogbemila's avatar David Awogbemila Committed by David S. Miller

gve: Implement packet continuation for RX.

This enables the driver to receive RX packets spread across multiple
buffers:

For a given multi-fragment packet the "packet continuation" bit is set
on all descriptors except the last one. These descriptors' payloads are
combined into a single SKB before the SKB is handed to the
networking stack.

This change adds a "packet buffer size" notion for RX queues. The
CreateRxQueue AdminQueue command sent to the device now includes the
packet_buffer_size.

We opt for a packet_buffer_size of PAGE_SIZE / 2 to give the
driver the opportunity to flip pages where we can instead of copying.
Signed-off-by: default avatarDavid Awogbemila <awogbemila@google.com>
Signed-off-by: default avatarJeroen de Borst <jeroendb@google.com>
Reviewed-by: default avatarCatherine Sullivan <csully@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1344e751
...@@ -149,6 +149,10 @@ struct gve_rx_ctx { ...@@ -149,6 +149,10 @@ struct gve_rx_ctx {
/* head and tail of skb chain for the current packet or NULL if none */ /* head and tail of skb chain for the current packet or NULL if none */
struct sk_buff *skb_head; struct sk_buff *skb_head;
struct sk_buff *skb_tail; struct sk_buff *skb_tail;
u16 total_expected_size;
u8 expected_frag_cnt;
u8 curr_frag_cnt;
u8 reuse_frags;
}; };
/* Contains datapath state used to represent an RX queue. */ /* Contains datapath state used to represent an RX queue. */
...@@ -162,6 +166,7 @@ struct gve_rx_ring { ...@@ -162,6 +166,7 @@ struct gve_rx_ring {
/* threshold for posting new buffs and descs */ /* threshold for posting new buffs and descs */
u32 db_threshold; u32 db_threshold;
u16 packet_buffer_size;
}; };
/* DQO fields. */ /* DQO fields. */
...@@ -209,6 +214,9 @@ struct gve_rx_ring { ...@@ -209,6 +214,9 @@ struct gve_rx_ring {
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */
u32 q_num; /* queue index */ u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */ u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */ struct gve_queue_resources *q_resources; /* head and tail pointer idx */
......
...@@ -530,6 +530,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -530,6 +530,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be64(rx->data.data_bus), cpu_to_be64(rx->data.data_bus),
cmd.create_rx_queue.index = cpu_to_be32(queue_index); cmd.create_rx_queue.index = cpu_to_be32(queue_index);
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else { } else {
cmd.create_rx_queue.rx_ring_size = cmd.create_rx_queue.rx_ring_size =
cpu_to_be16(priv->rx_desc_cnt); cpu_to_be16(priv->rx_desc_cnt);
......
...@@ -90,12 +90,13 @@ union gve_rx_data_slot { ...@@ -90,12 +90,13 @@ union gve_rx_data_slot {
/* GVE Recive Packet Descriptor Flags */ /* GVE Recive Packet Descriptor Flags */
#define GVE_RXFLG(x) cpu_to_be16(1 << (3 + (x))) #define GVE_RXFLG(x) cpu_to_be16(1 << (3 + (x)))
#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */ #define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */
#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */ #define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */
#define GVE_RXF_IPV6 GVE_RXFLG(5) /* IPv6 */ #define GVE_RXF_IPV6 GVE_RXFLG(5) /* IPv6 */
#define GVE_RXF_TCP GVE_RXFLG(6) /* TCP Packet */ #define GVE_RXF_TCP GVE_RXFLG(6) /* TCP Packet */
#define GVE_RXF_UDP GVE_RXFLG(7) /* UDP Packet */ #define GVE_RXF_UDP GVE_RXFLG(7) /* UDP Packet */
#define GVE_RXF_ERR GVE_RXFLG(8) /* Packet Error Detected */ #define GVE_RXF_ERR GVE_RXFLG(8) /* Packet Error Detected */
#define GVE_RXF_PKT_CONT GVE_RXFLG(10) /* Multi Fragment RX packet */
/* GVE IRQ */ /* GVE IRQ */
#define GVE_IRQ_ACK BIT(31) #define GVE_IRQ_ACK BIT(31)
......
...@@ -43,6 +43,7 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { ...@@ -43,6 +43,7 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
"rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]", "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]",
"rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]", "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]", "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
...@@ -265,6 +266,9 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -265,6 +266,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry(&priv->rx[ring].statss, } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start)); start));
data[i++] = tmp_rx_bytes; data[i++] = tmp_rx_bytes;
data[i++] = rx->rx_cont_packet_cnt;
data[i++] = rx->rx_frag_flip_cnt;
data[i++] = rx->rx_frag_copy_cnt;
/* rx dropped packets */ /* rx dropped packets */
data[i++] = tmp_rx_skb_alloc_fail + data[i++] = tmp_rx_skb_alloc_fail +
tmp_rx_buf_alloc_fail + tmp_rx_buf_alloc_fail +
......
...@@ -1371,14 +1371,6 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) ...@@ -1371,14 +1371,6 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
"Could not get device information: err=%d\n", err); "Could not get device information: err=%d\n", err);
goto err; goto err;
} }
if (gve_is_gqi(priv) && priv->dev->max_mtu > PAGE_SIZE) {
priv->dev->max_mtu = PAGE_SIZE;
err = gve_adminq_set_mtu(priv, priv->dev->mtu);
if (err) {
dev_err(&priv->pdev->dev, "Could not set mtu");
goto err;
}
}
priv->dev->mtu = priv->dev->max_mtu; priv->dev->mtu = priv->dev->max_mtu;
num_ntfy = pci_msix_vec_count(priv->pdev); num_ntfy = pci_msix_vec_count(priv->pdev);
if (num_ntfy <= 0) { if (num_ntfy <= 0) {
......
This diff is collapsed.
...@@ -568,7 +568,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, ...@@ -568,7 +568,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (eop && buf_len <= priv->rx_copybreak) { if (eop && buf_len <= priv->rx_copybreak) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len, 0); &buf_state->page_info, buf_len, 0, NULL);
if (unlikely(!rx->ctx.skb_head)) if (unlikely(!rx->ctx.skb_head))
goto error; goto error;
rx->ctx.skb_tail = rx->ctx.skb_head; rx->ctx.skb_tail = rx->ctx.skb_head;
......
...@@ -50,20 +50,31 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) ...@@ -50,20 +50,31 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len, struct gve_rx_slot_page_info *page_info, u16 len,
u16 pad) u16 padding, struct gve_rx_ctx *ctx)
{ {
struct sk_buff *skb = napi_alloc_skb(napi, len); void *va = page_info->page_address + padding + page_info->page_offset;
void *va = page_info->page_address + pad + int skb_linear_offset = 0;
page_info->page_offset; bool set_protocol = false;
struct sk_buff *skb;
if (unlikely(!skb))
return NULL; if (ctx) {
if (!ctx->skb_head)
ctx->skb_head = napi_alloc_skb(napi, ctx->total_expected_size);
if (unlikely(!ctx->skb_head))
return NULL;
skb = ctx->skb_head;
skb_linear_offset = skb->len;
set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
} else {
skb = napi_alloc_skb(napi, len);
set_protocol = true;
}
__skb_put(skb, len); __skb_put(skb, len);
skb_copy_to_linear_data_offset(skb, skb_linear_offset, va, len);
skb_copy_to_linear_data(skb, va, len); if (set_protocol)
skb->protocol = eth_type_trans(skb, dev);
skb->protocol = eth_type_trans(skb, dev);
return skb; return skb;
} }
......
...@@ -19,7 +19,7 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx); ...@@ -19,7 +19,7 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len, struct gve_rx_slot_page_info *page_info, u16 len,
u16 pad); u16 pad, struct gve_rx_ctx *ctx);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */ /* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info); void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment