Commit e6a76758 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net/mlx4_en: call gro handler for encapsulated frames

In order to use the native GRO handling of encapsulated protocols on
mlx4, we need to call napi_gro_receive() instead of netif_receive_skb()
unless busy polling is in action.

While we are at it, rename mlx4_en_cq_ll_polling() to
mlx4_en_cq_busy_polling()

Tested with GRE tunnel : GRO aggregation is now performed on the
ethernet device instead of being done later on gre device.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Amir Vadai <amirv@mellanox.com>
Cc: Jerry Chu <hkchu@google.com>
Cc: Or Gerlitz <ogerlitz@mellanox.com>
Acked-By: default avatarAmir Vadai <amirv@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d10dbad2
...@@ -724,7 +724,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -724,7 +724,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* - not an IP fragment * - not an IP fragment
* - no LLS polling in progress * - no LLS polling in progress
*/ */
if (!mlx4_en_cq_ll_polling(cq) && if (!mlx4_en_cq_busy_polling(cq) &&
(dev->features & NETIF_F_GRO)) { (dev->features & NETIF_F_GRO)) {
struct sk_buff *gro_skb = napi_get_frags(&cq->napi); struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
if (!gro_skb) if (!gro_skb)
...@@ -816,7 +816,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -816,7 +816,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb_mark_napi_id(skb, &cq->napi); skb_mark_napi_id(skb, &cq->napi);
/* Push it up the stack */ if (!mlx4_en_cq_busy_polling(cq))
napi_gro_receive(&cq->napi, skb);
else
netif_receive_skb(skb); netif_receive_skb(skb);
next: next:
......
...@@ -661,7 +661,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) ...@@ -661,7 +661,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
} }
/* true if a socket is polling, even if it did not get the lock */ /* true if a socket is polling, even if it did not get the lock */
static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq) static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
{ {
WARN_ON(!(cq->state & MLX4_CQ_LOCKED)); WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
return cq->state & CQ_USER_PEND; return cq->state & CQ_USER_PEND;
...@@ -691,7 +691,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) ...@@ -691,7 +691,7 @@ static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
return false; return false;
} }
static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq) static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
{ {
return false; return false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment