Commit c5adb96f authored by Tariq Toukan's avatar Tariq Toukan Committed by David S. Miller

net/mlx5e: Use napi_alloc_skb for RX SKB allocations

Instead of netdev_alloc_skb, we use the napi_alloc_skb function
which is designated to allocate skbuff's for RX in a
channel-specific NAPI instance, and implies the IP packet alignment.
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bc77b240
...@@ -93,7 +93,6 @@ ...@@ -93,7 +93,6 @@
#define MLX5E_SQ_BF_BUDGET 16 #define MLX5E_SQ_BF_BUDGET 16
#define MLX5E_NUM_MAIN_GROUPS 9 #define MLX5E_NUM_MAIN_GROUPS 9
#define MLX5E_NET_IP_ALIGN 2
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
{ {
......
...@@ -373,8 +373,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, ...@@ -373,8 +373,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->wqe_sz = (priv->params.lro_en) ? rq->wqe_sz = (priv->params.lro_en) ?
priv->params.lro_wqe_sz : priv->params.lro_wqe_sz :
MLX5E_SW2HW_MTU(priv->netdev->mtu); MLX5E_SW2HW_MTU(priv->netdev->mtu);
rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN); rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN; byte_count = rq->wqe_sz;
byte_count |= MLX5_HW_START_PADDING; byte_count |= MLX5_HW_START_PADDING;
} }
......
...@@ -47,7 +47,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) ...@@ -47,7 +47,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t dma_addr; dma_addr_t dma_addr;
skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz); skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz);
if (unlikely(!skb)) if (unlikely(!skb))
return -ENOMEM; return -ENOMEM;
...@@ -61,10 +61,8 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) ...@@ -61,10 +61,8 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
if (unlikely(dma_mapping_error(rq->pdev, dma_addr))) if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
goto err_free_skb; goto err_free_skb;
skb_reserve(skb, MLX5E_NET_IP_ALIGN);
*((dma_addr_t *)skb->cb) = dma_addr; *((dma_addr_t *)skb->cb) = dma_addr;
wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN); wqe->data.addr = cpu_to_be64(dma_addr);
wqe->data.lkey = rq->mkey_be; wqe->data.lkey = rq->mkey_be;
rq->skb[ix] = skb; rq->skb[ix] = skb;
...@@ -701,9 +699,9 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -701,9 +699,9 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto mpwrq_cqe_out; goto mpwrq_cqe_out;
} }
skb = netdev_alloc_skb(rq->netdev, skb = napi_alloc_skb(rq->cq.napi,
ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD,
sizeof(long))); sizeof(long)));
if (unlikely(!skb)) if (unlikely(!skb))
goto mpwrq_cqe_out; goto mpwrq_cqe_out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment