Commit 31ce6cee authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4_en-num-of-rings'

Tariq Toukan says:

====================
mlx4_en num of rings

This patchset from Inbar contains changes to rings control
to the mlx4 Eth driver.

Patches 1 and 2 limit the number of rings to the number of CPUs.
Patch 3 removes a limitation in logic of default number of RX rings.

Series generated against net-next commit:
812b5ca7 Add a driver for Renesas uPD60620 and uPD60620A PHYs
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 59d43876 80a8dc75
...@@ -1742,13 +1742,18 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) ...@@ -1742,13 +1742,18 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err; return err;
} }
static int mlx4_en_get_max_num_rx_rings(struct net_device *dev)
{
return min_t(int, num_online_cpus(), MAX_RX_RINGS);
}
static void mlx4_en_get_channels(struct net_device *dev, static void mlx4_en_get_channels(struct net_device *dev,
struct ethtool_channels *channel) struct ethtool_channels *channel)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
channel->max_rx = MAX_RX_RINGS; channel->max_rx = mlx4_en_get_max_num_rx_rings(dev);
channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; channel->max_tx = priv->mdev->profile.max_num_tx_rings_p_up;
channel->rx_count = priv->rx_ring_num; channel->rx_count = priv->rx_ring_num;
channel->tx_count = priv->tx_ring_num[TX] / channel->tx_count = priv->tx_ring_num[TX] /
...@@ -1777,7 +1782,7 @@ static int mlx4_en_set_channels(struct net_device *dev, ...@@ -1777,7 +1782,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock); mutex_lock(&mdev->state_lock);
xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0; xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
if (channel->tx_count * priv->prof->num_up + xdp_count > if (channel->tx_count * priv->prof->num_up + xdp_count >
MAX_TX_RINGS) { priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) {
err = -EINVAL; err = -EINVAL;
en_err(priv, en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
......
...@@ -153,7 +153,7 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) ...@@ -153,7 +153,7 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
int i; int i;
params->udp_rss = udp_rss; params->udp_rss = udp_rss;
params->num_tx_rings_p_up = mlx4_low_memory_profile() ? params->max_num_tx_rings_p_up = mlx4_low_memory_profile() ?
MLX4_EN_MIN_TX_RING_P_UP : MLX4_EN_MIN_TX_RING_P_UP :
min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP); min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP);
...@@ -170,8 +170,8 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) ...@@ -170,8 +170,8 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
params->prof[i].num_up = MLX4_EN_NUM_UP_LOW; params->prof[i].num_up = MLX4_EN_NUM_UP_LOW;
params->prof[i].num_tx_rings_p_up = params->num_tx_rings_p_up; params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up;
params->prof[i].tx_ring_num[TX] = params->num_tx_rings_p_up * params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up *
params->prof[i].num_up; params->prof[i].num_up;
params->prof[i].rss_rings = 0; params->prof[i].rss_rings = 0;
params->prof[i].inline_thold = inline_thold; params->prof[i].inline_thold = inline_thold;
......
...@@ -3305,7 +3305,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -3305,7 +3305,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME; priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
MLX4_WQE_CTRL_SOLICITED); MLX4_WQE_CTRL_SOLICITED);
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; priv->num_tx_rings_p_up = mdev->profile.max_num_tx_rings_p_up;
priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
......
...@@ -254,8 +254,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) ...@@ -254,8 +254,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
DEF_RX_RINGS)); DEF_RX_RINGS));
num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
min_t(int, num_of_eqs, min_t(int, num_of_eqs, num_online_cpus());
netif_get_num_default_rss_queues());
mdev->profile.prof[i].rx_ring_num = mdev->profile.prof[i].rx_ring_num =
rounddown_pow_of_two(num_rx_rings); rounddown_pow_of_two(num_rx_rings);
} }
......
...@@ -399,7 +399,7 @@ struct mlx4_en_profile { ...@@ -399,7 +399,7 @@ struct mlx4_en_profile {
u32 active_ports; u32 active_ports;
u32 small_pkt_int; u32 small_pkt_int;
u8 no_reset; u8 no_reset;
u8 num_tx_rings_p_up; u8 max_num_tx_rings_p_up;
struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment