Commit 19b427a4 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-gve-header-split-support'

Ziwei Xiao says:

====================
gve: Add header split support

Currently, the ethtool's ringparam has added a new field
tcp-data-split for enabling and disabling header split. These three
patches will utilize that ethtool flag to support header split in GVE
driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6752fb18 056a7092
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/u64_stats_sync.h> #include <linux/u64_stats_sync.h>
...@@ -51,12 +52,16 @@ ...@@ -51,12 +52,16 @@
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048 #define GVE_DEFAULT_RX_BUFFER_SIZE 2048
#define GVE_MAX_RX_BUFFER_SIZE 4096
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048 #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_XDP_ACTIONS 5 #define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
#define DQO_QPL_DEFAULT_TX_PAGES 512 #define DQO_QPL_DEFAULT_TX_PAGES 512
#define DQO_QPL_DEFAULT_RX_PAGES 2048 #define DQO_QPL_DEFAULT_RX_PAGES 2048
...@@ -150,6 +155,11 @@ struct gve_rx_compl_queue_dqo { ...@@ -150,6 +155,11 @@ struct gve_rx_compl_queue_dqo {
u32 mask; /* Mask for indices to the size of the ring */ u32 mask; /* Mask for indices to the size of the ring */
}; };
struct gve_header_buf {
u8 *data;
dma_addr_t addr;
};
/* Stores state for tracking buffers posted to HW */ /* Stores state for tracking buffers posted to HW */
struct gve_rx_buf_state_dqo { struct gve_rx_buf_state_dqo {
/* The page posted to HW. */ /* The page posted to HW. */
...@@ -252,19 +262,26 @@ struct gve_rx_ring { ...@@ -252,19 +262,26 @@ struct gve_rx_ring {
/* track number of used buffers */ /* track number of used buffers */
u16 used_buf_states_cnt; u16 used_buf_states_cnt;
/* Address info of the buffers for header-split */
struct gve_header_buf hdr_bufs;
} dqo; } dqo;
}; };
u64 rbytes; /* free-running bytes received */ u64 rbytes; /* free-running bytes received */
u64 rx_hsplit_bytes; /* free-running header bytes received */
u64 rpackets; /* free-running packets received */ u64 rpackets; /* free-running packets received */
u32 cnt; /* free-running total number of completed packets */ u32 cnt; /* free-running total number of completed packets */
u32 fill_cnt; /* free-running total number of descs and buffs posted */ u32 fill_cnt; /* free-running total number of descs and buffs posted */
u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
u64 rx_hsplit_pkt; /* free-running packets with headers split */
u64 rx_copybreak_pkt; /* free-running count of copybreak packets */ u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
u64 rx_copied_pkt; /* free-running total number of copied packets */ u64 rx_copied_pkt; /* free-running total number of copied packets */
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
/* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
u64 rx_hsplit_unsplit_pkt;
u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */ u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */ u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
...@@ -664,6 +681,7 @@ struct gve_rx_alloc_rings_cfg { ...@@ -664,6 +681,7 @@ struct gve_rx_alloc_rings_cfg {
struct gve_qpl_config *qpl_cfg; struct gve_qpl_config *qpl_cfg;
u16 ring_size; u16 ring_size;
u16 packet_buffer_size;
bool raw_addressing; bool raw_addressing;
bool enable_header_split; bool enable_header_split;
...@@ -778,13 +796,17 @@ struct gve_priv { ...@@ -778,13 +796,17 @@ struct gve_priv {
struct gve_ptype_lut *ptype_lut_dqo; struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */ /* Must be a power of two. */
int data_buffer_size_dqo; u16 data_buffer_size_dqo;
u16 max_rx_buffer_size; /* device limit */
enum gve_queue_format queue_format; enum gve_queue_format queue_format;
/* Interrupt coalescing settings */ /* Interrupt coalescing settings */
u32 tx_coalesce_usecs; u32 tx_coalesce_usecs;
u32 rx_coalesce_usecs; u32 rx_coalesce_usecs;
u16 header_buf_size; /* device configured, header-split supported if non-zero */
bool header_split_enabled; /* True if the header split is enabled by the user */
}; };
enum gve_service_task_flags_bit { enum gve_service_task_flags_bit {
...@@ -1122,6 +1144,9 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv, ...@@ -1122,6 +1144,9 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg); struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx); void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx); void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
bool gve_header_split_supported(const struct gve_priv *priv);
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
/* Reset */ /* Reset */
void gve_schedule_reset(struct gve_priv *priv); void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown); int gve_reset(struct gve_priv *priv, bool attempt_teardown);
......
...@@ -40,7 +40,8 @@ void gve_parse_device_option(struct gve_priv *priv, ...@@ -40,7 +40,8 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda, struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl) struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
{ {
u32 req_feat_mask = be32_to_cpu(option->required_features_mask); u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length); u16 option_length = be16_to_cpu(option->option_length);
...@@ -147,6 +148,23 @@ void gve_parse_device_option(struct gve_priv *priv, ...@@ -147,6 +148,23 @@ void gve_parse_device_option(struct gve_priv *priv,
} }
*dev_op_jumbo_frames = (void *)(option + 1); *dev_op_jumbo_frames = (void *)(option + 1);
break; break;
case GVE_DEV_OPT_ID_BUFFER_SIZES:
if (option_length < sizeof(**dev_op_buffer_sizes) ||
req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES) {
dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
"Buffer Sizes",
(int)sizeof(**dev_op_buffer_sizes),
GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES,
option_length, req_feat_mask);
break;
}
if (option_length > sizeof(**dev_op_buffer_sizes))
dev_warn(&priv->pdev->dev,
GVE_DEVICE_OPTION_TOO_BIG_FMT,
"Buffer Sizes");
*dev_op_buffer_sizes = (void *)(option + 1);
break;
default: default:
/* If we don't recognize the option just continue /* If we don't recognize the option just continue
* without doing anything. * without doing anything.
...@@ -164,7 +182,8 @@ gve_process_device_options(struct gve_priv *priv, ...@@ -164,7 +182,8 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda, struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl) struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
{ {
const int num_options = be16_to_cpu(descriptor->num_device_options); const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt; struct gve_device_option *dev_opt;
...@@ -185,7 +204,7 @@ gve_process_device_options(struct gve_priv *priv, ...@@ -185,7 +204,7 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt, gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl, dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames, dev_op_dqo_rda, dev_op_jumbo_frames,
dev_op_dqo_qpl); dev_op_dqo_qpl, dev_op_buffer_sizes);
dev_opt = next_opt; dev_opt = next_opt;
} }
...@@ -640,6 +659,9 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -640,6 +659,9 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be16(rx_buff_ring_entries); cpu_to_be16(rx_buff_ring_entries);
cmd.create_rx_queue.enable_rsc = cmd.create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO); !!(priv->dev->features & NETIF_F_LRO);
if (priv->header_split_enabled)
cmd.create_rx_queue.header_buffer_size =
cpu_to_be16(priv->header_buf_size);
} }
return gve_adminq_issue_cmd(priv, &cmd); return gve_adminq_issue_cmd(priv, &cmd);
...@@ -755,7 +777,9 @@ static void gve_enable_supported_features(struct gve_priv *priv, ...@@ -755,7 +777,9 @@ static void gve_enable_supported_features(struct gve_priv *priv,
const struct gve_device_option_jumbo_frames const struct gve_device_option_jumbo_frames
*dev_op_jumbo_frames, *dev_op_jumbo_frames,
const struct gve_device_option_dqo_qpl const struct gve_device_option_dqo_qpl
*dev_op_dqo_qpl) *dev_op_dqo_qpl,
const struct gve_device_option_buffer_sizes
*dev_op_buffer_sizes)
{ {
/* Before control reaches this point, the page-size-capped max MTU from /* Before control reaches this point, the page-size-capped max MTU from
* the gve_device_descriptor field has already been stored in * the gve_device_descriptor field has already been stored in
...@@ -779,10 +803,22 @@ static void gve_enable_supported_features(struct gve_priv *priv, ...@@ -779,10 +803,22 @@ static void gve_enable_supported_features(struct gve_priv *priv,
if (priv->rx_pages_per_qpl == 0) if (priv->rx_pages_per_qpl == 0)
priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES; priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
} }
if (dev_op_buffer_sizes &&
(supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) {
priv->max_rx_buffer_size =
be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size);
priv->header_buf_size =
be16_to_cpu(dev_op_buffer_sizes->header_buffer_size);
dev_info(&priv->pdev->dev,
"BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
priv->max_rx_buffer_size, priv->header_buf_size);
}
} }
int gve_adminq_describe_device(struct gve_priv *priv) int gve_adminq_describe_device(struct gve_priv *priv)
{ {
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL; struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL; struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
...@@ -816,7 +852,8 @@ int gve_adminq_describe_device(struct gve_priv *priv) ...@@ -816,7 +852,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda, err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
&dev_op_gqi_qpl, &dev_op_dqo_rda, &dev_op_gqi_qpl, &dev_op_dqo_rda,
&dev_op_jumbo_frames, &dev_op_jumbo_frames,
&dev_op_dqo_qpl); &dev_op_dqo_qpl,
&dev_op_buffer_sizes);
if (err) if (err)
goto free_device_descriptor; goto free_device_descriptor;
...@@ -885,7 +922,8 @@ int gve_adminq_describe_device(struct gve_priv *priv) ...@@ -885,7 +922,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues); priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask, gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl); dev_op_jumbo_frames, dev_op_dqo_qpl,
dev_op_buffer_sizes);
free_device_descriptor: free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
......
...@@ -125,6 +125,15 @@ struct gve_device_option_jumbo_frames { ...@@ -125,6 +125,15 @@ struct gve_device_option_jumbo_frames {
static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8); static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
struct gve_device_option_buffer_sizes {
/* GVE_SUP_BUFFER_SIZES_MASK bit should be set */
__be32 supported_features_mask;
__be16 packet_buffer_size;
__be16 header_buffer_size;
};
static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
/* Terminology: /* Terminology:
* *
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
...@@ -140,6 +149,7 @@ enum gve_dev_opt_id { ...@@ -140,6 +149,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_DQO_RDA = 0x4, GVE_DEV_OPT_ID_DQO_RDA = 0x4,
GVE_DEV_OPT_ID_DQO_QPL = 0x7, GVE_DEV_OPT_ID_DQO_QPL = 0x7,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
}; };
enum gve_dev_opt_req_feat_mask { enum gve_dev_opt_req_feat_mask {
...@@ -149,10 +159,12 @@ enum gve_dev_opt_req_feat_mask { ...@@ -149,10 +159,12 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
}; };
enum gve_sup_feature_mask { enum gve_sup_feature_mask {
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2, GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
}; };
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0 #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
...@@ -165,6 +177,7 @@ enum gve_driver_capbility { ...@@ -165,6 +177,7 @@ enum gve_driver_capbility {
gve_driver_capability_dqo_qpl = 2, /* reserved for future use */ gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
gve_driver_capability_dqo_rda = 3, gve_driver_capability_dqo_rda = 3,
gve_driver_capability_alt_miss_compl = 4, gve_driver_capability_alt_miss_compl = 4,
gve_driver_capability_flexible_buffer_size = 5,
}; };
#define GVE_CAP1(a) BIT((int)a) #define GVE_CAP1(a) BIT((int)a)
...@@ -176,7 +189,8 @@ enum gve_driver_capbility { ...@@ -176,7 +189,8 @@ enum gve_driver_capbility {
(GVE_CAP1(gve_driver_capability_gqi_qpl) | \ (GVE_CAP1(gve_driver_capability_gqi_qpl) | \
GVE_CAP1(gve_driver_capability_gqi_rda) | \ GVE_CAP1(gve_driver_capability_gqi_rda) | \
GVE_CAP1(gve_driver_capability_dqo_rda) | \ GVE_CAP1(gve_driver_capability_dqo_rda) | \
GVE_CAP1(gve_driver_capability_alt_miss_compl)) GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
GVE_CAP1(gve_driver_capability_flexible_buffer_size))
#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0 #define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0 #define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
...@@ -260,7 +274,9 @@ struct gve_adminq_create_rx_queue { ...@@ -260,7 +274,9 @@ struct gve_adminq_create_rx_queue {
__be16 packet_buffer_size; __be16 packet_buffer_size;
__be16 rx_buff_ring_size; __be16 rx_buff_ring_size;
u8 enable_rsc; u8 enable_rsc;
u8 padding[5]; u8 padding1;
__be16 header_buffer_size;
u8 padding2[2];
}; };
static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56); static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
* Copyright (C) 2015-2021 Google, Inc. * Copyright (C) 2015-2021 Google, Inc.
*/ */
#include <linux/ethtool.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include "gve.h" #include "gve.h"
#include "gve_adminq.h" #include "gve_adminq.h"
...@@ -40,17 +39,18 @@ static u32 gve_get_msglevel(struct net_device *netdev) ...@@ -40,17 +39,18 @@ static u32 gve_get_msglevel(struct net_device *netdev)
* as declared in enum xdp_action inside file uapi/linux/bpf.h . * as declared in enum xdp_action inside file uapi/linux/bpf.h .
*/ */
static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes",
"rx_dropped", "tx_dropped", "tx_timeouts", "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts",
"rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt", "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
"rx_hsplit_unsplit_pkt",
"interface_up_cnt", "interface_down_cnt", "reset_cnt", "interface_up_cnt", "interface_down_cnt", "reset_cnt",
"page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt", "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
}; };
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
"rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]", "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]",
"rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_bytes[%u]", "rx_hsplit_bytes[%u]", "rx_cont_packet_cnt[%u]",
"rx_frag_alloc_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]", "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]", "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
...@@ -154,11 +154,13 @@ static void ...@@ -154,11 +154,13 @@ static void
gve_get_ethtool_stats(struct net_device *netdev, gve_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes,
tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt,
tmp_tx_pkts, tmp_tx_bytes; tmp_tx_pkts, tmp_tx_bytes;
u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts, u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt,
rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped; rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes,
tx_dropped;
int stats_idx, base_stats_idx, max_stats_idx; int stats_idx, base_stats_idx, max_stats_idx;
struct stats *report_stats; struct stats *report_stats;
int *rx_qid_to_stats_idx; int *rx_qid_to_stats_idx;
...@@ -185,8 +187,10 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -185,8 +187,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
kfree(rx_qid_to_stats_idx); kfree(rx_qid_to_stats_idx);
return; return;
} }
for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0, for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0; rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
ring = 0;
ring < priv->rx_cfg.num_queues; ring++) { ring < priv->rx_cfg.num_queues; ring++) {
if (priv->rx) { if (priv->rx) {
do { do {
...@@ -195,18 +199,23 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -195,18 +199,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
start = start =
u64_stats_fetch_begin(&priv->rx[ring].statss); u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets; tmp_rx_pkts = rx->rpackets;
tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt;
tmp_rx_bytes = rx->rbytes; tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt = tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt; rx->rx_desc_err_dropped_pkt;
tmp_rx_hsplit_unsplit_pkt =
rx->rx_hsplit_unsplit_pkt;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss, } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start)); start));
rx_pkts += tmp_rx_pkts; rx_pkts += tmp_rx_pkts;
rx_hsplit_pkt += tmp_rx_hsplit_pkt;
rx_bytes += tmp_rx_bytes; rx_bytes += tmp_rx_bytes;
rx_skb_alloc_fail += tmp_rx_skb_alloc_fail; rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
rx_buf_alloc_fail += tmp_rx_buf_alloc_fail; rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt; rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt;
} }
} }
for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
...@@ -227,6 +236,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -227,6 +236,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
i = 0; i = 0;
data[i++] = rx_pkts; data[i++] = rx_pkts;
data[i++] = rx_hsplit_pkt;
data[i++] = tx_pkts; data[i++] = tx_pkts;
data[i++] = rx_bytes; data[i++] = rx_bytes;
data[i++] = tx_bytes; data[i++] = tx_bytes;
...@@ -238,6 +248,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -238,6 +248,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx_skb_alloc_fail; data[i++] = rx_skb_alloc_fail;
data[i++] = rx_buf_alloc_fail; data[i++] = rx_buf_alloc_fail;
data[i++] = rx_desc_err_dropped_pkt; data[i++] = rx_desc_err_dropped_pkt;
data[i++] = rx_hsplit_unsplit_pkt;
data[i++] = priv->interface_up_cnt; data[i++] = priv->interface_up_cnt;
data[i++] = priv->interface_down_cnt; data[i++] = priv->interface_down_cnt;
data[i++] = priv->reset_cnt; data[i++] = priv->reset_cnt;
...@@ -277,6 +288,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -277,6 +288,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
start = start =
u64_stats_fetch_begin(&priv->rx[ring].statss); u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes; tmp_rx_bytes = rx->rbytes;
tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt = tmp_rx_desc_err_dropped_pkt =
...@@ -284,6 +296,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -284,6 +296,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry(&priv->rx[ring].statss, } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start)); start));
data[i++] = tmp_rx_bytes; data[i++] = tmp_rx_bytes;
data[i++] = tmp_rx_hsplit_bytes;
data[i++] = rx->rx_cont_packet_cnt; data[i++] = rx->rx_cont_packet_cnt;
data[i++] = rx->rx_frag_flip_cnt; data[i++] = rx->rx_frag_flip_cnt;
data[i++] = rx->rx_frag_copy_cnt; data[i++] = rx->rx_frag_copy_cnt;
...@@ -480,6 +493,29 @@ static void gve_get_ringparam(struct net_device *netdev, ...@@ -480,6 +493,29 @@ static void gve_get_ringparam(struct net_device *netdev,
cmd->tx_max_pending = priv->tx_desc_cnt; cmd->tx_max_pending = priv->tx_desc_cnt;
cmd->rx_pending = priv->rx_desc_cnt; cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt; cmd->tx_pending = priv->tx_desc_cnt;
if (!gve_header_split_supported(priv))
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
else if (priv->header_split_enabled)
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
else
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static int gve_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *cmd,
struct kernel_ethtool_ringparam *kernel_cmd,
struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
if (priv->tx_desc_cnt != cmd->tx_pending ||
priv->rx_desc_cnt != cmd->rx_pending) {
dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n");
return -EOPNOTSUPP;
}
return gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
} }
static int gve_user_reset(struct net_device *netdev, u32 *flags) static int gve_user_reset(struct net_device *netdev, u32 *flags)
...@@ -655,6 +691,7 @@ static int gve_set_coalesce(struct net_device *netdev, ...@@ -655,6 +691,7 @@ static int gve_set_coalesce(struct net_device *netdev,
const struct ethtool_ops gve_ethtool_ops = { const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS, .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_drvinfo = gve_get_drvinfo, .get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings, .get_strings = gve_get_strings,
.get_sset_count = gve_get_sset_count, .get_sset_count = gve_get_sset_count,
...@@ -667,6 +704,7 @@ const struct ethtool_ops gve_ethtool_ops = { ...@@ -667,6 +704,7 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_coalesce = gve_get_coalesce, .get_coalesce = gve_get_coalesce,
.set_coalesce = gve_set_coalesce, .set_coalesce = gve_set_coalesce,
.get_ringparam = gve_get_ringparam, .get_ringparam = gve_get_ringparam,
.set_ringparam = gve_set_ringparam,
.reset = gve_user_reset, .reset = gve_user_reset,
.get_tunable = gve_get_tunable, .get_tunable = gve_get_tunable,
.set_tunable = gve_set_tunable, .set_tunable = gve_set_tunable,
......
...@@ -1307,9 +1307,13 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv, ...@@ -1307,9 +1307,13 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->qcfg = &priv->rx_cfg; cfg->qcfg = &priv->rx_cfg;
cfg->qcfg_tx = &priv->tx_cfg; cfg->qcfg_tx = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv); cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled;
cfg->qpls = priv->qpls; cfg->qpls = priv->qpls;
cfg->qpl_cfg = &priv->qpl_cfg; cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->rx_desc_cnt; cfg->ring_size = priv->rx_desc_cnt;
cfg->packet_buffer_size = gve_is_gqi(priv) ?
GVE_DEFAULT_RX_BUFFER_SIZE :
priv->data_buffer_size_dqo;
cfg->rx = priv->rx; cfg->rx = priv->rx;
} }
...@@ -1448,12 +1452,9 @@ static int gve_queues_start(struct gve_priv *priv, ...@@ -1448,12 +1452,9 @@ static int gve_queues_start(struct gve_priv *priv,
if (err) if (err)
goto reset; goto reset;
if (!gve_is_gqi(priv)) { priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
/* Hard code this for now. This may be tuned in the future for priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
* performance.
*/
priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
}
err = gve_create_rings(priv); err = gve_create_rings(priv);
if (err) if (err)
goto reset; goto reset;
...@@ -2065,6 +2066,56 @@ static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) ...@@ -2065,6 +2066,56 @@ static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
priv->tx_timeo_cnt++; priv->tx_timeo_cnt++;
} }
u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
{
if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE)
return GVE_MAX_RX_BUFFER_SIZE;
else
return GVE_DEFAULT_RX_BUFFER_SIZE;
}
/* header-split is not supported on non-DQO_RDA yet even if device advertises it */
bool gve_header_split_supported(const struct gve_priv *priv)
{
return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT;
}
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
bool enable_hdr_split;
int err = 0;
if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
return 0;
if (!gve_header_split_supported(priv)) {
dev_err(&priv->pdev->dev, "Header-split not supported\n");
return -EOPNOTSUPP;
}
if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
enable_hdr_split = true;
else
enable_hdr_split = false;
if (enable_hdr_split == priv->header_split_enabled)
return 0;
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
rx_alloc_cfg.enable_header_split = enable_hdr_split;
rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
if (netif_running(priv->dev))
err = gve_adjust_config(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
static int gve_set_features(struct net_device *netdev, static int gve_set_features(struct net_device *netdev,
netdev_features_t features) netdev_features_t features)
{ {
...@@ -2511,6 +2562,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2511,6 +2562,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->service_task_flags = 0x0; priv->service_task_flags = 0x0;
priv->state_flags = 0x0; priv->state_flags = 0x0;
priv->ethtool_flags = 0x0; priv->ethtool_flags = 0x0;
priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_set_probe_in_progress(priv); gve_set_probe_in_progress(priv);
priv->gve_wq = alloc_ordered_workqueue("gve", 0); priv->gve_wq = alloc_ordered_workqueue("gve", 0);
......
...@@ -199,6 +199,18 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx, ...@@ -199,6 +199,18 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
return 0; return 0;
} }
static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
{
struct device *hdev = &priv->pdev->dev;
int buf_count = rx->dqo.bufq.mask + 1;
if (rx->dqo.hdr_bufs.data) {
dma_free_coherent(hdev, priv->header_buf_size * buf_count,
rx->dqo.hdr_bufs.data, rx->dqo.hdr_bufs.addr);
rx->dqo.hdr_bufs.data = NULL;
}
}
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx) void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{ {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
...@@ -258,9 +270,24 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, ...@@ -258,9 +270,24 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
kvfree(rx->dqo.buf_states); kvfree(rx->dqo.buf_states);
rx->dqo.buf_states = NULL; rx->dqo.buf_states = NULL;
gve_rx_free_hdr_bufs(priv, rx);
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
} }
static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
{
struct device *hdev = &priv->pdev->dev;
int buf_count = rx->dqo.bufq.mask + 1;
rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count,
&rx->dqo.hdr_bufs.addr, GFP_KERNEL);
if (!rx->dqo.hdr_bufs.data)
return -ENOMEM;
return 0;
}
void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx) void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx)
{ {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
...@@ -302,6 +329,11 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, ...@@ -302,6 +329,11 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
if (!rx->dqo.buf_states) if (!rx->dqo.buf_states)
return -ENOMEM; return -ENOMEM;
/* Allocate header buffers for header-split */
if (cfg->enable_header_split)
if (gve_rx_alloc_hdr_bufs(priv, rx))
goto err;
/* Set up linked list of buffer IDs */ /* Set up linked list of buffer IDs */
for (i = 0; i < rx->dqo.num_buf_states - 1; i++) for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
rx->dqo.buf_states[i].next = i + 1; rx->dqo.buf_states[i].next = i + 1;
...@@ -443,6 +475,10 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) ...@@ -443,6 +475,10 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
desc->buf_addr = cpu_to_le64(buf_state->addr + desc->buf_addr = cpu_to_le64(buf_state->addr +
buf_state->page_info.page_offset); buf_state->page_info.page_offset);
if (rx->dqo.hdr_bufs.data)
desc->header_buf_addr =
cpu_to_le64(rx->dqo.hdr_bufs.addr +
priv->header_buf_size * bufq->tail);
bufq->tail = (bufq->tail + 1) & bufq->mask; bufq->tail = (bufq->tail + 1) & bufq->mask;
complq->num_free_slots--; complq->num_free_slots--;
...@@ -458,7 +494,7 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) ...@@ -458,7 +494,7 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state) struct gve_rx_buf_state_dqo *buf_state)
{ {
const int data_buffer_size = priv->data_buffer_size_dqo; const u16 data_buffer_size = priv->data_buffer_size_dqo;
int pagecount; int pagecount;
/* Can't reuse if we only fit one buffer per page */ /* Can't reuse if we only fit one buffer per page */
...@@ -645,13 +681,16 @@ static int gve_rx_append_frags(struct napi_struct *napi, ...@@ -645,13 +681,16 @@ static int gve_rx_append_frags(struct napi_struct *napi,
*/ */
static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
const struct gve_rx_compl_desc_dqo *compl_desc, const struct gve_rx_compl_desc_dqo *compl_desc,
int queue_idx) u32 desc_idx, int queue_idx)
{ {
const u16 buffer_id = le16_to_cpu(compl_desc->buf_id); const u16 buffer_id = le16_to_cpu(compl_desc->buf_id);
const bool hbo = compl_desc->header_buffer_overflow;
const bool eop = compl_desc->end_of_packet != 0; const bool eop = compl_desc->end_of_packet != 0;
const bool hsplit = compl_desc->split_header;
struct gve_rx_buf_state_dqo *buf_state; struct gve_rx_buf_state_dqo *buf_state;
struct gve_priv *priv = rx->gve; struct gve_priv *priv = rx->gve;
u16 buf_len; u16 buf_len;
u16 hdr_len;
if (unlikely(buffer_id >= rx->dqo.num_buf_states)) { if (unlikely(buffer_id >= rx->dqo.num_buf_states)) {
net_err_ratelimited("%s: Invalid RX buffer_id=%u\n", net_err_ratelimited("%s: Invalid RX buffer_id=%u\n",
...@@ -672,12 +711,35 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, ...@@ -672,12 +711,35 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
} }
buf_len = compl_desc->packet_len; buf_len = compl_desc->packet_len;
hdr_len = compl_desc->header_len;
/* Page might have not been used for awhile and was likely last written /* Page might have not been used for awhile and was likely last written
* by a different thread. * by a different thread.
*/ */
prefetch(buf_state->page_info.page); prefetch(buf_state->page_info.page);
/* Copy the header into the skb in the case of header split */
if (hsplit) {
int unsplit = 0;
if (hdr_len && !hbo) {
rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi,
rx->dqo.hdr_bufs.data +
desc_idx * priv->header_buf_size,
hdr_len);
if (unlikely(!rx->ctx.skb_head))
goto error;
rx->ctx.skb_tail = rx->ctx.skb_head;
} else {
unsplit = 1;
}
u64_stats_update_begin(&rx->statss);
rx->rx_hsplit_pkt++;
rx->rx_hsplit_unsplit_pkt += unsplit;
rx->rx_hsplit_bytes += hdr_len;
u64_stats_update_end(&rx->statss);
}
/* Sync the portion of dma buffer for CPU to read. */ /* Sync the portion of dma buffer for CPU to read. */
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr, dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
buf_state->page_info.page_offset, buf_state->page_info.page_offset,
...@@ -820,7 +882,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) ...@@ -820,7 +882,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* Do not read data until we own the descriptor */ /* Do not read data until we own the descriptor */
dma_rmb(); dma_rmb();
err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num); err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
if (err < 0) { if (err < 0) {
gve_rx_free_skb(rx); gve_rx_free_skb(rx);
u64_stats_update_begin(&rx->statss); u64_stats_update_begin(&rx->statss);
......
...@@ -64,11 +64,9 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) ...@@ -64,11 +64,9 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
rx->ntfy_id = ntfy_idx; rx->ntfy_id = ntfy_idx;
} }
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len) u8 *data, u16 len)
{ {
void *va = page_info->page_address + page_info->page_offset +
page_info->pad;
struct sk_buff *skb; struct sk_buff *skb;
skb = napi_alloc_skb(napi, len); skb = napi_alloc_skb(napi, len);
...@@ -76,12 +74,21 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, ...@@ -76,12 +74,21 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
return NULL; return NULL;
__skb_put(skb, len); __skb_put(skb, len);
skb_copy_to_linear_data_offset(skb, 0, va, len); skb_copy_to_linear_data_offset(skb, 0, data, len);
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
return skb; return skb;
} }
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len)
{
void *va = page_info->page_address + page_info->page_offset +
page_info->pad;
return gve_rx_copy_data(dev, napi, va, len);
}
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info) void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
{ {
page_info->pagecnt_bias--; page_info->pagecnt_bias--;
......
...@@ -19,6 +19,9 @@ bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx); ...@@ -19,6 +19,9 @@ bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx);
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx); void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx); void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi,
u8 *data, u16 len);
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len); struct gve_rx_slot_page_info *page_info, u16 len);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment