Commit 3a69ab87 authored by David S. Miller's avatar David S. Miller

Merge branch 'ionic-better-tx-sg=handling'

Shannon Nelson says:

====================
ionic: better Tx SG handling

The primary patch here is to be sure we're not hitting linearize on a Tx
skb when we don't really need to.  The other two are related details.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4fa7011d 529cdfd5
......@@ -7,6 +7,7 @@
#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/skbuff.h>
#include "ionic_if.h"
#include "ionic_regs.h"
......@@ -216,7 +217,7 @@ struct ionic_desc_info {
};
unsigned int bytes;
unsigned int nbufs;
struct ionic_buf_info bufs[IONIC_MAX_FRAGS];
struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1];
ionic_desc_cb cb;
void *cb_arg;
};
......
......@@ -3831,6 +3831,18 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
qtype, qti->max_sg_elems);
dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
qtype, qti->sg_desc_stride);
if (qti->max_sg_elems >= IONIC_MAX_FRAGS) {
qti->max_sg_elems = IONIC_MAX_FRAGS - 1;
dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n",
qtype, qti->max_sg_elems);
}
if (qti->max_sg_elems > MAX_SKB_FRAGS) {
qti->max_sg_elems = MAX_SKB_FRAGS;
dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n",
qtype, qti->max_sg_elems);
}
}
}
......
......@@ -1239,25 +1239,84 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
bool too_many_frags = false;
skb_frag_t *frag;
int desc_bufs;
int chunk_len;
int frag_rem;
int tso_rem;
int seg_rem;
bool encap;
int hdrlen;
int ndescs;
int err;
/* Each desc is mss long max, so a descriptor for each gso_seg */
if (skb_is_gso(skb))
if (skb_is_gso(skb)) {
ndescs = skb_shinfo(skb)->gso_segs;
else
} else {
ndescs = 1;
if (skb_shinfo(skb)->nr_frags > q->max_sg_elems) {
too_many_frags = true;
goto linearize;
}
}
/* If non-TSO, just need 1 desc and nr_frags sg elems */
if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
/* If non-TSO, or no frags to check, we're done */
if (!skb_is_gso(skb) || !skb_shinfo(skb)->nr_frags)
return ndescs;
/* Too many frags, so linearize */
err = skb_linearize(skb);
if (err)
return err;
/* We need to scan the skb to be sure that none of the MTU sized
* packets in the TSO will require more sgs per descriptor than we
* can support. We loop through the frags, add up the lengths for
* a packet, and count the number of sgs used per packet.
*/
tso_rem = skb->len;
frag = skb_shinfo(skb)->frags;
encap = skb->encapsulation;
/* start with just hdr in first part of first descriptor */
if (encap)
hdrlen = skb_inner_tcp_all_headers(skb);
else
hdrlen = skb_tcp_all_headers(skb);
seg_rem = min_t(int, tso_rem, hdrlen + skb_shinfo(skb)->gso_size);
frag_rem = hdrlen;
while (tso_rem > 0) {
desc_bufs = 0;
while (seg_rem > 0) {
desc_bufs++;
/* We add the +1 because we can take buffers for one
* more than we have SGs: one for the initial desc data
* in addition to the SG segments that might follow.
*/
if (desc_bufs > q->max_sg_elems + 1) {
too_many_frags = true;
goto linearize;
}
if (frag_rem == 0) {
frag_rem = skb_frag_size(frag);
frag++;
}
chunk_len = min(frag_rem, seg_rem);
frag_rem -= chunk_len;
tso_rem -= chunk_len;
seg_rem -= chunk_len;
}
seg_rem = min_t(int, tso_rem, skb_shinfo(skb)->gso_size);
}
stats->linearize++;
linearize:
if (too_many_frags) {
err = skb_linearize(skb);
if (err)
return err;
stats->linearize++;
}
return ndescs;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment