Commit 60c7f5ae authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

mlx4: removal of frag_sizes[]

We will soon use order-0 pages, and frag truesize will more precisely
match real sizes.

In the new model, we prefer to use <= 2048 bytes fragments, so that
we can use page-recycle technique on PAGE_SIZE=4096 arches.

We will still pack as much frames as possible on arches with big
pages, like PowerPC.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent acd7628d
...@@ -1181,13 +1181,6 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) ...@@ -1181,13 +1181,6 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
return done; return done;
} }
static const int frag_sizes[] = {
FRAG_SZ0,
FRAG_SZ1,
FRAG_SZ2,
FRAG_SZ3
};
void mlx4_en_calc_rx_buf(struct net_device *dev) void mlx4_en_calc_rx_buf(struct net_device *dev)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
...@@ -1211,13 +1204,16 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) ...@@ -1211,13 +1204,16 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
int buf_size = 0; int buf_size = 0;
while (buf_size < eff_mtu) { while (buf_size < eff_mtu) {
priv->frag_info[i].frag_size = int frag_size = eff_mtu - buf_size;
(eff_mtu > buf_size + frag_sizes[i]) ?
frag_sizes[i] : eff_mtu - buf_size; if (i < MLX4_EN_MAX_RX_FRAGS - 1)
priv->frag_info[i].frag_stride = frag_size = min(frag_size, 2048);
ALIGN(priv->frag_info[i].frag_size,
SMP_CACHE_BYTES); priv->frag_info[i].frag_size = frag_size;
buf_size += priv->frag_info[i].frag_size;
priv->frag_info[i].frag_stride = ALIGN(frag_size,
SMP_CACHE_BYTES);
buf_size += frag_size;
i++; i++;
} }
priv->rx_page_order = MLX4_EN_ALLOC_PREFER_ORDER; priv->rx_page_order = MLX4_EN_ALLOC_PREFER_ORDER;
......
...@@ -105,14 +105,6 @@ ...@@ -105,14 +105,6 @@
#define MLX4_EN_ALLOC_PREFER_ORDER min_t(int, get_order(32768), \ #define MLX4_EN_ALLOC_PREFER_ORDER min_t(int, get_order(32768), \
PAGE_ALLOC_COSTLY_ORDER) PAGE_ALLOC_COSTLY_ORDER)
/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
* and 4K allocations) */
enum {
FRAG_SZ0 = 1536 - NET_IP_ALIGN,
FRAG_SZ1 = 4096,
FRAG_SZ2 = 4096,
FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
};
#define MLX4_EN_MAX_RX_FRAGS 4 #define MLX4_EN_MAX_RX_FRAGS 4
/* Maximum ring sizes */ /* Maximum ring sizes */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment