Commit acea73d6 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net/mlx4_en: Enable the compiler to make is_inline() inlined

Reorganize code to call is_inline() once, so compiler can inline it
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarAmir Vadai <amirv@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e70602a8
...@@ -531,29 +531,32 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, ...@@ -531,29 +531,32 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
return ring->buf + index * TXBB_SIZE; return ring->buf + index * TXBB_SIZE;
} }
/* Decide if skb can be inlined in tx descriptor to avoid dma mapping
*
* It seems strange we do not simply use skb_copy_bits().
* This would allow to inline all skbs iff skb->len <= inline_thold
*
* Note that caller already checked skb was not a gso packet
*/
static bool is_inline(int inline_thold, const struct sk_buff *skb, static bool is_inline(int inline_thold, const struct sk_buff *skb,
const struct skb_shared_info *shinfo, const struct skb_shared_info *shinfo,
void **pfrag) void **pfrag)
{ {
void *ptr; void *ptr;
if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { if (skb->len > inline_thold || !inline_thold)
if (shinfo->nr_frags == 1) { return false;
ptr = skb_frag_address_safe(&shinfo->frags[0]);
if (unlikely(!ptr))
return 0;
if (pfrag)
*pfrag = ptr;
return 1; if (shinfo->nr_frags == 1) {
} else if (unlikely(shinfo->nr_frags)) ptr = skb_frag_address_safe(&shinfo->frags[0]);
return 0; if (unlikely(!ptr))
else return false;
return 1; *pfrag = ptr;
return true;
} }
if (shinfo->nr_frags)
return 0; return false;
return true;
} }
static int inline_size(const struct sk_buff *skb) static int inline_size(const struct sk_buff *skb)
...@@ -570,12 +573,15 @@ static int inline_size(const struct sk_buff *skb) ...@@ -570,12 +573,15 @@ static int inline_size(const struct sk_buff *skb)
static int get_real_size(const struct sk_buff *skb, static int get_real_size(const struct sk_buff *skb,
const struct skb_shared_info *shinfo, const struct skb_shared_info *shinfo,
struct net_device *dev, struct net_device *dev,
int *lso_header_size) int *lso_header_size,
bool *inline_ok,
void **pfrag)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
int real_size; int real_size;
if (shinfo->gso_size) { if (shinfo->gso_size) {
*inline_ok = false;
if (skb->encapsulation) if (skb->encapsulation)
*lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb); *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
else else
...@@ -595,10 +601,14 @@ static int get_real_size(const struct sk_buff *skb, ...@@ -595,10 +601,14 @@ static int get_real_size(const struct sk_buff *skb,
} }
} else { } else {
*lso_header_size = 0; *lso_header_size = 0;
if (!is_inline(priv->prof->inline_thold, skb, shinfo, NULL)) *inline_ok = is_inline(priv->prof->inline_thold, skb,
real_size = CTRL_SIZE + (shinfo->nr_frags + 1) * DS_SIZE; shinfo, pfrag);
else
if (*inline_ok)
real_size = inline_size(skb); real_size = inline_size(skb);
else
real_size = CTRL_SIZE +
(shinfo->nr_frags + 1) * DS_SIZE;
} }
return real_size; return real_size;
...@@ -694,9 +704,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -694,9 +704,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
u16 vlan_tag = 0; u16 vlan_tag = 0;
int i_frag; int i_frag;
int lso_header_size; int lso_header_size;
void *fragptr; void *fragptr = NULL;
bool bounce = false; bool bounce = false;
bool send_doorbell; bool send_doorbell;
bool inline_ok;
u32 ring_cons; u32 ring_cons;
if (!priv->port_up) if (!priv->port_up)
...@@ -708,7 +719,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -708,7 +719,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* fetch ring->cons far ahead before needing it to avoid stall */ /* fetch ring->cons far ahead before needing it to avoid stall */
ring_cons = ACCESS_ONCE(ring->cons); ring_cons = ACCESS_ONCE(ring->cons);
real_size = get_real_size(skb, shinfo, dev, &lso_header_size); real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr);
if (unlikely(!real_size)) if (unlikely(!real_size))
goto tx_drop; goto tx_drop;
...@@ -781,15 +793,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -781,15 +793,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* valid only for none inline segments */ /* valid only for none inline segments */
tx_info->data_offset = (void *)data - (void *)tx_desc; tx_info->data_offset = (void *)data - (void *)tx_desc;
tx_info->inl = inline_ok;
tx_info->linear = (lso_header_size < skb_headlen(skb) && tx_info->linear = (lso_header_size < skb_headlen(skb) &&
!is_inline(ring->inline_thold, skb, shinfo, NULL)) ? 1 : 0; !inline_ok) ? 1 : 0;
tx_info->nr_maps = shinfo->nr_frags + tx_info->linear; tx_info->nr_maps = shinfo->nr_frags + tx_info->linear;
data += tx_info->nr_maps - 1; data += tx_info->nr_maps - 1;
if (is_inline(ring->inline_thold, skb, shinfo, &fragptr)) { if (!tx_info->inl) {
tx_info->inl = 1;
} else {
dma_addr_t dma = 0; dma_addr_t dma = 0;
u32 byte_count = 0; u32 byte_count = 0;
...@@ -827,7 +839,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -827,7 +839,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
wmb(); wmb();
data->byte_count = cpu_to_be32(byte_count); data->byte_count = cpu_to_be32(byte_count);
} }
tx_info->inl = 0;
/* tx completion can avoid cache line miss for common cases */ /* tx completion can avoid cache line miss for common cases */
tx_info->map0_dma = dma; tx_info->map0_dma = dma;
tx_info->map0_byte_count = byte_count; tx_info->map0_byte_count = byte_count;
...@@ -899,11 +910,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -899,11 +910,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
if (tx_info->inl) { if (tx_info->inl)
build_inline_wqe(tx_desc, skb, shinfo, real_size, &vlan_tag, build_inline_wqe(tx_desc, skb, shinfo, real_size, &vlan_tag,
tx_ind, fragptr); tx_ind, fragptr);
tx_info->inl = 1;
}
if (skb->encapsulation) { if (skb->encapsulation) {
struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb); struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment