Commit b94d3703 authored by Harshitha Ramamurthy's avatar Harshitha Ramamurthy Committed by David S. Miller

gve: set page count for RX QPL for GQI and DQO queue formats

Fulfill the requirement that for GQI, the number of pages per
RX QPL is equal to the ring size. Set this value to be equal to
ring size. Because of this change, the rx_data_slot_cnt and
rx_pages_per_qpl fields stored in the priv structure are not
needed, so remove their usage. And for DQO, the number of pages
per RX QPL is more than ring size to account for out-of-order
completions. So set it to two times of rx ring size.
Reviewed-by: default avatarPraveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarHarshitha Ramamurthy <hramamurthy@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5dee3c70
......@@ -63,7 +63,6 @@
#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
#define DQO_QPL_DEFAULT_TX_PAGES 512
#define DQO_QPL_DEFAULT_RX_PAGES 2048
/* Maximum TSO size supported on DQO */
#define GVE_DQO_TX_MAX 0x3FFFF
......@@ -714,8 +713,6 @@ struct gve_priv {
u16 tx_desc_cnt; /* num desc per ring */
u16 rx_desc_cnt; /* num desc per ring */
u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */
u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
struct bpf_prog *xdp_prog; /* XDP BPF program */
......@@ -1038,6 +1035,14 @@ static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
return gve_get_rx_qpl_id(tx_cfg, 0);
}
static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
{
/* For DQO, page count should be more than ring size for
* out-of-order completions. Set it to two times of ring size.
*/
return 2 * rx_desc_cnt;
}
/* Returns a pointer to the next available tx qpl in the list of qpls */
static inline
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
......
......@@ -764,12 +764,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
if (dev_op_dqo_qpl) {
priv->tx_pages_per_qpl =
be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
priv->rx_pages_per_qpl =
be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl);
if (priv->tx_pages_per_qpl == 0)
priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
if (priv->rx_pages_per_qpl == 0)
priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
}
if (dev_op_buffer_sizes &&
......@@ -878,13 +874,6 @@ int gve_adminq_describe_device(struct gve_priv *priv)
mac = descriptor->mac;
dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
priv->rx_data_slot_cnt);
priv->rx_desc_cnt = priv->rx_data_slot_cnt;
}
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask,
......
......@@ -1103,13 +1103,13 @@ static int gve_alloc_n_qpls(struct gve_priv *priv,
return err;
}
static int gve_alloc_qpls(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *cfg)
static int gve_alloc_qpls(struct gve_priv *priv, struct gve_qpls_alloc_cfg *cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
int rx_start_id, tx_num_qpls, rx_num_qpls;
struct gve_queue_page_list *qpls;
int page_count;
u32 page_count;
int err;
if (cfg->raw_addressing)
......@@ -1141,8 +1141,12 @@ static int gve_alloc_qpls(struct gve_priv *priv,
/* For GQI_QPL number of pages allocated have 1:1 relationship with
* number of descriptors. For DQO, number of pages required are
* more than descriptors (because of out of order completions).
* Set it to twice the number of descriptors.
*/
page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
if (cfg->is_gqi)
page_count = rx_alloc_cfg->ring_size;
else
page_count = gve_get_rx_pages_per_qpl_dqo(rx_alloc_cfg->ring_size);
rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
if (err)
......@@ -1363,7 +1367,7 @@ static int gve_queues_mem_alloc(struct gve_priv *priv,
{
int err;
err = gve_alloc_qpls(priv, qpls_alloc_cfg);
err = gve_alloc_qpls(priv, qpls_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
return err;
......
......@@ -240,7 +240,7 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
u32 slots = priv->rx_data_slot_cnt;
u32 slots = cfg->ring_size;
int filled_pages;
size_t bytes;
int err;
......
......@@ -178,7 +178,7 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
return err;
} else {
idx = rx->dqo.next_qpl_page_idx;
if (idx >= priv->rx_pages_per_qpl) {
if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
net_err_ratelimited("%s: Out of QPL pages\n",
priv->dev->name);
return -ENOMEM;
......@@ -321,7 +321,7 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
rx->dqo.num_buf_states = cfg->raw_addressing ?
min_t(s16, S16_MAX, buffer_queue_slots * 4) :
priv->rx_pages_per_qpl;
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
sizeof(rx->dqo.buf_states[0]),
GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment