Commit 5dee3c70 authored by Harshitha Ramamurthy's avatar Harshitha Ramamurthy Committed by David S. Miller

gve: make the completion and buffer ring size equal for DQO

For the DQO queue format, the gve driver stores two ring sizes
for both TX and RX - one for completion queue ring and one for
data buffer ring. This is supposed to enable asymmetric sizes
for these two rings but that is not supported. Make both fields
reference the same single variable.

This change renders reading supported TX completion ring size
and RX buffer ring size for DQO from the device useless, so change
those fields to reserved and remove related code.
Reviewed-by: default avatarPraveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarHarshitha Ramamurthy <hramamurthy@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4cbc70f6
...@@ -621,11 +621,6 @@ struct gve_qpl_config { ...@@ -621,11 +621,6 @@ struct gve_qpl_config {
unsigned long *qpl_id_map; /* bitmap of used qpl ids */ unsigned long *qpl_id_map; /* bitmap of used qpl ids */
}; };
struct gve_options_dqo_rda {
u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
};
struct gve_irq_db { struct gve_irq_db {
__be32 index; __be32 index;
} ____cacheline_aligned; } ____cacheline_aligned;
...@@ -792,7 +787,6 @@ struct gve_priv { ...@@ -792,7 +787,6 @@ struct gve_priv {
u64 link_speed; u64 link_speed;
bool up_before_suspend; /* True if dev was up before suspend */ bool up_before_suspend; /* True if dev was up before suspend */
struct gve_options_dqo_rda options_dqo_rda;
struct gve_ptype_lut *ptype_lut_dqo; struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */ /* Must be a power of two. */
......
...@@ -565,6 +565,7 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -565,6 +565,7 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be64(tx->q_resources_bus), cpu_to_be64(tx->q_resources_bus),
.tx_ring_addr = cpu_to_be64(tx->bus), .tx_ring_addr = cpu_to_be64(tx->bus),
.ntfy_id = cpu_to_be32(tx->ntfy_id), .ntfy_id = cpu_to_be32(tx->ntfy_id),
.tx_ring_size = cpu_to_be16(priv->tx_desc_cnt),
}; };
if (gve_is_gqi(priv)) { if (gve_is_gqi(priv)) {
...@@ -573,24 +574,17 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -573,24 +574,17 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
} else { } else {
u16 comp_ring_size;
u32 qpl_id = 0; u32 qpl_id = 0;
if (priv->queue_format == GVE_DQO_RDA_FORMAT) { if (priv->queue_format == GVE_DQO_RDA_FORMAT)
qpl_id = GVE_RAW_ADDRESSING_QPL_ID; qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
comp_ring_size = else
priv->options_dqo_rda.tx_comp_ring_entries;
} else {
qpl_id = tx->dqo.qpl->id; qpl_id = tx->dqo.qpl->id;
comp_ring_size = priv->tx_desc_cnt;
}
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd.create_tx_queue.tx_ring_size =
cpu_to_be16(priv->tx_desc_cnt);
cmd.create_tx_queue.tx_comp_ring_addr = cmd.create_tx_queue.tx_comp_ring_addr =
cpu_to_be64(tx->complq_bus_dqo); cpu_to_be64(tx->complq_bus_dqo);
cmd.create_tx_queue.tx_comp_ring_size = cmd.create_tx_queue.tx_comp_ring_size =
cpu_to_be16(comp_ring_size); cpu_to_be16(priv->tx_desc_cnt);
} }
return gve_adminq_issue_cmd(priv, &cmd); return gve_adminq_issue_cmd(priv, &cmd);
...@@ -621,6 +615,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -621,6 +615,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
.queue_id = cpu_to_be32(queue_index), .queue_id = cpu_to_be32(queue_index),
.ntfy_id = cpu_to_be32(rx->ntfy_id), .ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus), .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
.rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
}; };
if (gve_is_gqi(priv)) { if (gve_is_gqi(priv)) {
...@@ -635,20 +630,13 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -635,20 +630,13 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size); cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else { } else {
u16 rx_buff_ring_entries;
u32 qpl_id = 0; u32 qpl_id = 0;
if (priv->queue_format == GVE_DQO_RDA_FORMAT) { if (priv->queue_format == GVE_DQO_RDA_FORMAT)
qpl_id = GVE_RAW_ADDRESSING_QPL_ID; qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
rx_buff_ring_entries = else
priv->options_dqo_rda.rx_buff_ring_entries;
} else {
qpl_id = rx->dqo.qpl->id; qpl_id = rx->dqo.qpl->id;
rx_buff_ring_entries = priv->rx_desc_cnt;
}
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd.create_rx_queue.rx_ring_size =
cpu_to_be16(priv->rx_desc_cnt);
cmd.create_rx_queue.rx_desc_ring_addr = cmd.create_rx_queue.rx_desc_ring_addr =
cpu_to_be64(rx->dqo.complq.bus); cpu_to_be64(rx->dqo.complq.bus);
cmd.create_rx_queue.rx_data_ring_addr = cmd.create_rx_queue.rx_data_ring_addr =
...@@ -656,7 +644,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -656,7 +644,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_rx_queue.packet_buffer_size = cmd.create_rx_queue.packet_buffer_size =
cpu_to_be16(priv->data_buffer_size_dqo); cpu_to_be16(priv->data_buffer_size_dqo);
cmd.create_rx_queue.rx_buff_ring_size = cmd.create_rx_queue.rx_buff_ring_size =
cpu_to_be16(rx_buff_ring_entries); cpu_to_be16(priv->rx_desc_cnt);
cmd.create_rx_queue.enable_rsc = cmd.create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO); !!(priv->dev->features & NETIF_F_LRO);
if (priv->header_split_enabled) if (priv->header_split_enabled)
...@@ -746,18 +734,10 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues) ...@@ -746,18 +734,10 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
} }
static void gve_set_default_desc_cnt(struct gve_priv *priv, static void gve_set_default_desc_cnt(struct gve_priv *priv,
const struct gve_device_descriptor *descriptor, const struct gve_device_descriptor *descriptor)
const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
{ {
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries); priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
priv->options_dqo_rda.tx_comp_ring_entries =
be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
priv->options_dqo_rda.rx_buff_ring_entries =
be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
}
} }
static void gve_enable_supported_features(struct gve_priv *priv, static void gve_enable_supported_features(struct gve_priv *priv,
...@@ -878,7 +858,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) ...@@ -878,7 +858,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
} }
/* set default descriptor counts */ /* set default descriptor counts */
gve_set_default_desc_cnt(priv, descriptor, dev_op_dqo_rda); gve_set_default_desc_cnt(priv, descriptor);
/* DQO supports LRO. */ /* DQO supports LRO. */
if (!gve_is_gqi(priv)) if (!gve_is_gqi(priv))
......
...@@ -103,8 +103,7 @@ static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4); ...@@ -103,8 +103,7 @@ static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
struct gve_device_option_dqo_rda { struct gve_device_option_dqo_rda {
__be32 supported_features_mask; __be32 supported_features_mask;
__be16 tx_comp_ring_entries; __be32 reserved;
__be16 rx_buff_ring_entries;
}; };
static_assert(sizeof(struct gve_device_option_dqo_rda) == 8); static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
......
...@@ -305,8 +305,7 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, ...@@ -305,8 +305,7 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
size_t size; size_t size;
int i; int i;
const u32 buffer_queue_slots = cfg->raw_addressing ? const u32 buffer_queue_slots = cfg->ring_size;
priv->options_dqo_rda.rx_buff_ring_entries : cfg->ring_size;
const u32 completion_queue_slots = cfg->ring_size; const u32 completion_queue_slots = cfg->ring_size;
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n"); netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
......
...@@ -295,9 +295,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, ...@@ -295,9 +295,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
/* Queue sizes must be a power of 2 */ /* Queue sizes must be a power of 2 */
tx->mask = cfg->ring_size - 1; tx->mask = cfg->ring_size - 1;
tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ? tx->dqo.complq_mask = tx->mask;
priv->options_dqo_rda.tx_comp_ring_entries - 1 :
tx->mask;
/* The max number of pending packets determines the maximum number of /* The max number of pending packets determines the maximum number of
* descriptors which maybe written to the completion queue. * descriptors which maybe written to the completion queue.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment