Commit eb6c503d authored by Casey Leedom's avatar Casey Leedom Committed by David S. Miller

cxgb4vf: fix some errors in Gather List to skb conversion

There were some errors in the way that internal Gather Lists were being
translated into skb's.  This also makes the VF Driver look more like the PF
Driver to facilitate easier comarison.
Signed-off-by: default avatarCasey Leedom <leedom@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b94e72e2
......@@ -154,13 +154,14 @@ enum {
*/
RX_COPY_THRES = 256,
RX_PULL_LEN = 128,
};
/*
* Can't define this in the above enum because PKTSHIFT isn't a constant in
* the VF Driver ...
*/
#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT)
/*
* Main body length for sk_buffs used for RX Ethernet packets with
* fragments. Should be >= RX_PULL_LEN but possibly bigger to give
* pskb_may_pull() some room.
*/
RX_SKB_LEN = 512,
};
/*
* Software state per TX descriptor.
......@@ -1354,6 +1355,67 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
/**
* t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
* @gl: the gather list
* @skb_len: size of sk_buff main body if it carries fragments
* @pull_len: amount of data to move to the sk_buff's main body
*
* Builds an sk_buff from the given packet gather list. Returns the
* sk_buff or %NULL if sk_buff allocation failed.
*/
struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len)
{
struct sk_buff *skb;
struct skb_shared_info *ssi;
/*
* If the ingress packet is small enough, allocate an skb large enough
* for all of the data and copy it inline. Otherwise, allocate an skb
* with enough room to pull in the header and reference the rest of
* the data via the skb fragment list.
*
* Below we rely on RX_COPY_THRES being less than the smallest Rx
* buff! size, which is expected since buffers are at least
* PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
* fragment.
*/
if (gl->tot_len <= RX_COPY_THRES) {
/* small packets have only one fragment */
skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
if (unlikely(!skb))
goto out;
__skb_put(skb, gl->tot_len);
skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
} else {
skb = alloc_skb(skb_len, GFP_ATOMIC);
if (unlikely(!skb))
goto out;
__skb_put(skb, pull_len);
skb_copy_to_linear_data(skb, gl->va, pull_len);
ssi = skb_shinfo(skb);
ssi->frags[0].page = gl->frags[0].page;
ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
ssi->frags[0].size = gl->frags[0].size - pull_len;
if (gl->nfrags > 1)
memcpy(&ssi->frags[1], &gl->frags[1],
(gl->nfrags-1) * sizeof(skb_frag_t));
ssi->nr_frags = gl->nfrags;
skb->len = gl->tot_len;
skb->data_len = skb->len - pull_len;
skb->truesize += skb->data_len;
/* Get a reference for the last page, we don't own it */
get_page(gl->frags[gl->nfrags - 1].page);
}
out:
return skb;
}
/**
* t4vf_pktgl_free - free a packet gather list
* @gl: the gather list
......@@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
{
struct sk_buff *skb;
struct port_info *pi;
struct skb_shared_info *ssi;
const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
bool csum_ok = pkt->csum_calc && !pkt->err_vec;
unsigned int len = be16_to_cpu(pkt->len);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
/*
......@@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
}
/*
* If the ingress packet is small enough, allocate an skb large enough
* for all of the data and copy it inline. Otherwise, allocate an skb
* with enough room to pull in the header and reference the rest of
* the data via the skb fragment list.
* Convert the Packet Gather List into an skb.
*/
if (len <= RX_COPY_THRES) {
/* small packets have only one fragment */
skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC);
if (!skb)
goto nomem;
__skb_put(skb, gl->frags[0].size);
skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
} else {
skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
if (!skb)
goto nomem;
__skb_put(skb, RX_PKT_PULL_LEN);
skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
ssi = skb_shinfo(skb);
ssi->frags[0].page = gl->frags[0].page;
ssi->frags[0].page_offset = (gl->frags[0].page_offset +
RX_PKT_PULL_LEN);
ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
if (gl->nfrags > 1)
memcpy(&ssi->frags[1], &gl->frags[1],
(gl->nfrags-1) * sizeof(skb_frag_t));
ssi->nr_frags = gl->nfrags;
skb->len = len + PKTSHIFT;
skb->data_len = skb->len - RX_PKT_PULL_LEN;
skb->truesize += skb->data_len;
/* Get a reference for the last page, we don't own it */
get_page(gl->frags[gl->nfrags - 1].page);
skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
if (unlikely(!skb)) {
t4vf_pktgl_free(gl);
rxq->stats.rx_drops++;
return 0;
}
__skb_pull(skb, PKTSHIFT);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
......@@ -1549,11 +1581,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
netif_receive_skb(skb);
return 0;
nomem:
t4vf_pktgl_free(gl);
rxq->stats.rx_drops++;
return 0;
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment