Commit 34c58c89 authored by David S. Miller's avatar David S. Miller

Merge branch 'gve-ring-size-changes'

Harshitha Ramamurthy says:

====================
gve: enable ring size changes

This series enables support to change ring size via ethtool
in gve.

The first three patches deal with some clean up, setting
default values for the ring sizes and related fields. The
last two patches enable ring size changes.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4c6ce450 834f9458
......@@ -50,6 +50,10 @@
/* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES 1024
/* Default minimum ring size */
#define GVE_DEFAULT_MIN_TX_RING_SIZE 256
#define GVE_DEFAULT_MIN_RX_RING_SIZE 512
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
#define GVE_MAX_RX_BUFFER_SIZE 4096
......@@ -63,7 +67,6 @@
#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
#define DQO_QPL_DEFAULT_TX_PAGES 512
#define DQO_QPL_DEFAULT_RX_PAGES 2048
/* Maximum TSO size supported on DQO */
#define GVE_DQO_TX_MAX 0x3FFFF
......@@ -621,11 +624,6 @@ struct gve_qpl_config {
unsigned long *qpl_id_map; /* bitmap of used qpl ids */
};
struct gve_options_dqo_rda {
u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
};
struct gve_irq_db {
__be32 index;
} ____cacheline_aligned;
......@@ -718,9 +716,13 @@ struct gve_priv {
u16 num_event_counters;
u16 tx_desc_cnt; /* num desc per ring */
u16 rx_desc_cnt; /* num desc per ring */
u16 max_tx_desc_cnt;
u16 max_rx_desc_cnt;
u16 min_tx_desc_cnt;
u16 min_rx_desc_cnt;
bool modify_ring_size_enabled;
bool default_min_ring_size;
u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */
u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
struct bpf_prog *xdp_prog; /* XDP BPF program */
......@@ -792,7 +794,6 @@ struct gve_priv {
u64 link_speed;
bool up_before_suspend; /* True if dev was up before suspend */
struct gve_options_dqo_rda options_dqo_rda;
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
......@@ -1044,6 +1045,14 @@ static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
return gve_get_rx_qpl_id(tx_cfg, 0);
}
static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
{
/* For DQO, page count should be more than ring size for
* out-of-order completions. Set it to two times of ring size.
*/
return 2 * rx_desc_cnt;
}
/* Returns a pointer to the next available tx qpl in the list of qpls */
static inline
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
......@@ -1150,6 +1159,14 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_config(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config);
......
......@@ -103,8 +103,7 @@ static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
struct gve_device_option_dqo_rda {
__be32 supported_features_mask;
__be16 tx_comp_ring_entries;
__be16 rx_buff_ring_entries;
__be32 reserved;
};
static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
......@@ -134,6 +133,16 @@ struct gve_device_option_buffer_sizes {
static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
struct gve_device_option_modify_ring {
__be32 supported_featured_mask;
__be16 max_rx_ring_size;
__be16 max_tx_ring_size;
__be16 min_rx_ring_size;
__be16 min_tx_ring_size;
};
static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
......@@ -143,28 +152,31 @@ static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
* the device for read/write and data is copied from/to SKBs.
*/
enum gve_dev_opt_id {
GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
GVE_DEV_OPT_ID_GQI_RDA = 0x2,
GVE_DEV_OPT_ID_GQI_QPL = 0x3,
GVE_DEV_OPT_ID_DQO_RDA = 0x4,
GVE_DEV_OPT_ID_DQO_QPL = 0x7,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
GVE_DEV_OPT_ID_GQI_RDA = 0x2,
GVE_DEV_OPT_ID_GQI_QPL = 0x3,
GVE_DEV_OPT_ID_DQO_RDA = 0x4,
GVE_DEV_OPT_ID_MODIFY_RING = 0x6,
GVE_DEV_OPT_ID_DQO_QPL = 0x7,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
};
enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
};
enum gve_sup_feature_mask {
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
GVE_SUP_MODIFY_RING_MASK = 1 << 0,
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
......
......@@ -490,8 +490,8 @@ static void gve_get_ringparam(struct net_device *netdev,
{
struct gve_priv *priv = netdev_priv(netdev);
cmd->rx_max_pending = priv->rx_desc_cnt;
cmd->tx_max_pending = priv->tx_desc_cnt;
cmd->rx_max_pending = priv->max_rx_desc_cnt;
cmd->tx_max_pending = priv->max_tx_desc_cnt;
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
......@@ -503,20 +503,93 @@ static void gve_get_ringparam(struct net_device *netdev,
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static int gve_adjust_ring_sizes(struct gve_priv *priv,
u16 new_tx_desc_cnt,
u16 new_rx_desc_cnt)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_qpl_config new_qpl_cfg;
int err;
/* get current queue configuration */
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
/* copy over the new ring_size from ethtool */
tx_alloc_cfg.ring_size = new_tx_desc_cnt;
rx_alloc_cfg.ring_size = new_rx_desc_cnt;
/* qpl_cfg is not read-only, it contains a map that gets updated as
* rings are allocated, which is why we cannot use the yet unreleased
* one in priv.
*/
qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
if (netif_running(priv->dev)) {
err = gve_adjust_config(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
}
/* Set new ring_size for the next up */
priv->tx_desc_cnt = new_tx_desc_cnt;
priv->rx_desc_cnt = new_rx_desc_cnt;
return 0;
}
static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
u16 new_rx_desc_cnt)
{
/* check for valid range */
if (new_tx_desc_cnt < priv->min_tx_desc_cnt ||
new_tx_desc_cnt > priv->max_tx_desc_cnt ||
new_rx_desc_cnt < priv->min_rx_desc_cnt ||
new_rx_desc_cnt > priv->max_rx_desc_cnt) {
dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n");
return -EINVAL;
}
if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) {
dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n");
return -EINVAL;
}
return 0;
}
static int gve_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *cmd,
struct kernel_ethtool_ringparam *kernel_cmd,
struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
u16 new_tx_cnt, new_rx_cnt;
int err;
err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
if (err)
return err;
if (priv->tx_desc_cnt != cmd->tx_pending ||
priv->rx_desc_cnt != cmd->rx_pending) {
dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n");
if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt)
return 0;
if (!priv->modify_ring_size_enabled) {
dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
return -EOPNOTSUPP;
}
return gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
new_tx_cnt = cmd->tx_pending;
new_rx_cnt = cmd->rx_pending;
if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt))
return -EINVAL;
return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt);
}
static int gve_user_reset(struct net_device *netdev, u32 *flags)
......
......@@ -1103,13 +1103,13 @@ static int gve_alloc_n_qpls(struct gve_priv *priv,
return err;
}
static int gve_alloc_qpls(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *cfg)
static int gve_alloc_qpls(struct gve_priv *priv, struct gve_qpls_alloc_cfg *cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
int rx_start_id, tx_num_qpls, rx_num_qpls;
struct gve_queue_page_list *qpls;
int page_count;
u32 page_count;
int err;
if (cfg->raw_addressing)
......@@ -1141,8 +1141,12 @@ static int gve_alloc_qpls(struct gve_priv *priv,
/* For GQI_QPL number of pages allocated have 1:1 relationship with
* number of descriptors. For DQO, number of pages required are
* more than descriptors (because of out of order completions).
* Set it to twice the number of descriptors.
*/
page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
if (cfg->is_gqi)
page_count = rx_alloc_cfg->ring_size;
else
page_count = gve_get_rx_pages_per_qpl_dqo(rx_alloc_cfg->ring_size);
rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
if (err)
......@@ -1310,10 +1314,10 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->rx = priv->rx;
}
static void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
......@@ -1363,7 +1367,7 @@ static int gve_queues_mem_alloc(struct gve_priv *priv,
{
int err;
err = gve_alloc_qpls(priv, qpls_alloc_cfg);
err = gve_alloc_qpls(priv, qpls_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
return err;
......@@ -1863,10 +1867,10 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
static int gve_adjust_config(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
int gve_adjust_config(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
......
......@@ -240,7 +240,7 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
u32 slots = priv->rx_data_slot_cnt;
u32 slots = cfg->ring_size;
int filled_pages;
size_t bytes;
int err;
......
......@@ -178,7 +178,7 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
return err;
} else {
idx = rx->dqo.next_qpl_page_idx;
if (idx >= priv->rx_pages_per_qpl) {
if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
net_err_ratelimited("%s: Out of QPL pages\n",
priv->dev->name);
return -ENOMEM;
......@@ -305,8 +305,7 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
size_t size;
int i;
const u32 buffer_queue_slots = cfg->raw_addressing ?
priv->options_dqo_rda.rx_buff_ring_entries : cfg->ring_size;
const u32 buffer_queue_slots = cfg->ring_size;
const u32 completion_queue_slots = cfg->ring_size;
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
......@@ -322,7 +321,7 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
rx->dqo.num_buf_states = cfg->raw_addressing ?
min_t(s16, S16_MAX, buffer_queue_slots * 4) :
priv->rx_pages_per_qpl;
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
sizeof(rx->dqo.buf_states[0]),
GFP_KERNEL);
......
......@@ -295,9 +295,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
/* Queue sizes must be a power of 2 */
tx->mask = cfg->ring_size - 1;
tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ?
priv->options_dqo_rda.tx_comp_ring_entries - 1 :
tx->mask;
tx->dqo.complq_mask = tx->mask;
/* The max number of pending packets determines the maximum number of
* descriptors which maybe written to the completion queue.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment