Commit 0b43cf52 authored by Jeroen de Borst's avatar Jeroen de Borst Committed by David S. Miller

gve: Add header split device option

To enable header split via ethtool, we first need to query the device to
get the max rx buffer size and header buffer size. Add a device option
to get these values and store them in the driver. If the header buffer
size received from the device is non-zero, it means header split is
supported in the device.

Currently the max rx buffer size will only be used when header split is
enabled which will set the data_buffer_size_dqo to be the max rx buffer
size. Also change the data_buffer_size_dqo from int to u16 since we are
modifying it and making it to be consistent with max_rx_buffer_size.
Co-developed-by: default avatarZiwei Xiao <ziweixiao@google.com>
Signed-off-by: default avatarZiwei Xiao <ziweixiao@google.com>
Signed-off-by: default avatarJeroen de Borst <jeroendb@google.com>
Reviewed-by: default avatarPraveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: default avatarHarshitha Ramamurthy <hramamurthy@google.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6752fb18
......@@ -51,12 +51,16 @@
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
#define GVE_MAX_RX_BUFFER_SIZE 4096
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
#define DQO_QPL_DEFAULT_TX_PAGES 512
#define DQO_QPL_DEFAULT_RX_PAGES 2048
......@@ -778,13 +782,16 @@ struct gve_priv {
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
int data_buffer_size_dqo;
u16 data_buffer_size_dqo;
u16 max_rx_buffer_size; /* device limit */
enum gve_queue_format queue_format;
/* Interrupt coalescing settings */
u32 tx_coalesce_usecs;
u32 rx_coalesce_usecs;
u16 header_buf_size; /* device configured, header-split supported if non-zero */
};
enum gve_service_task_flags_bit {
......
......@@ -40,7 +40,8 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
......@@ -147,6 +148,23 @@ void gve_parse_device_option(struct gve_priv *priv,
}
*dev_op_jumbo_frames = (void *)(option + 1);
break;
case GVE_DEV_OPT_ID_BUFFER_SIZES:
if (option_length < sizeof(**dev_op_buffer_sizes) ||
req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES) {
dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
"Buffer Sizes",
(int)sizeof(**dev_op_buffer_sizes),
GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES,
option_length, req_feat_mask);
break;
}
if (option_length > sizeof(**dev_op_buffer_sizes))
dev_warn(&priv->pdev->dev,
GVE_DEVICE_OPTION_TOO_BIG_FMT,
"Buffer Sizes");
*dev_op_buffer_sizes = (void *)(option + 1);
break;
default:
/* If we don't recognize the option just continue
* without doing anything.
......@@ -164,7 +182,8 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt;
......@@ -185,7 +204,7 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
dev_op_dqo_qpl);
dev_op_dqo_qpl, dev_op_buffer_sizes);
dev_opt = next_opt;
}
......@@ -755,7 +774,9 @@ static void gve_enable_supported_features(struct gve_priv *priv,
const struct gve_device_option_jumbo_frames
*dev_op_jumbo_frames,
const struct gve_device_option_dqo_qpl
*dev_op_dqo_qpl)
*dev_op_dqo_qpl,
const struct gve_device_option_buffer_sizes
*dev_op_buffer_sizes)
{
/* Before control reaches this point, the page-size-capped max MTU from
* the gve_device_descriptor field has already been stored in
......@@ -779,10 +800,22 @@ static void gve_enable_supported_features(struct gve_priv *priv,
if (priv->rx_pages_per_qpl == 0)
priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
}
if (dev_op_buffer_sizes &&
(supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) {
priv->max_rx_buffer_size =
be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size);
priv->header_buf_size =
be16_to_cpu(dev_op_buffer_sizes->header_buffer_size);
dev_info(&priv->pdev->dev,
"BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
priv->max_rx_buffer_size, priv->header_buf_size);
}
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
......@@ -816,7 +849,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
&dev_op_gqi_qpl, &dev_op_dqo_rda,
&dev_op_jumbo_frames,
&dev_op_dqo_qpl);
&dev_op_dqo_qpl,
&dev_op_buffer_sizes);
if (err)
goto free_device_descriptor;
......@@ -885,7 +919,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl);
dev_op_jumbo_frames, dev_op_dqo_qpl,
dev_op_buffer_sizes);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
......
......@@ -125,6 +125,15 @@ struct gve_device_option_jumbo_frames {
static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
struct gve_device_option_buffer_sizes {
/* GVE_SUP_BUFFER_SIZES_MASK bit should be set */
__be32 supported_features_mask;
__be16 packet_buffer_size;
__be16 header_buffer_size;
};
static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
......@@ -140,6 +149,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_DQO_RDA = 0x4,
GVE_DEV_OPT_ID_DQO_QPL = 0x7,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
};
enum gve_dev_opt_req_feat_mask {
......@@ -149,10 +159,12 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
};
enum gve_sup_feature_mask {
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
......@@ -165,6 +177,7 @@ enum gve_driver_capbility {
gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
gve_driver_capability_dqo_rda = 3,
gve_driver_capability_alt_miss_compl = 4,
gve_driver_capability_flexible_buffer_size = 5,
};
#define GVE_CAP1(a) BIT((int)a)
......@@ -176,7 +189,8 @@ enum gve_driver_capbility {
(GVE_CAP1(gve_driver_capability_gqi_qpl) | \
GVE_CAP1(gve_driver_capability_gqi_rda) | \
GVE_CAP1(gve_driver_capability_dqo_rda) | \
GVE_CAP1(gve_driver_capability_alt_miss_compl))
GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
GVE_CAP1(gve_driver_capability_flexible_buffer_size))
#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
......@@ -260,7 +274,9 @@ struct gve_adminq_create_rx_queue {
__be16 packet_buffer_size;
__be16 rx_buff_ring_size;
u8 enable_rsc;
u8 padding[5];
u8 padding1;
__be16 header_buffer_size;
u8 padding2[2];
};
static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
......
......@@ -1448,12 +1448,6 @@ static int gve_queues_start(struct gve_priv *priv,
if (err)
goto reset;
if (!gve_is_gqi(priv)) {
/* Hard code this for now. This may be tuned in the future for
* performance.
*/
priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
}
err = gve_create_rings(priv);
if (err)
goto reset;
......@@ -2511,6 +2505,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
priv->ethtool_flags = 0x0;
priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_set_probe_in_progress(priv);
priv->gve_wq = alloc_ordered_workqueue("gve", 0);
......
......@@ -458,7 +458,7 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
const int data_buffer_size = priv->data_buffer_size_dqo;
const u16 data_buffer_size = priv->data_buffer_size_dqo;
int pagecount;
/* Can't reuse if we only fit one buffer per page */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment