Commit b54ad0ea authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: correct descriptor offsets in presence of metadata

The TSO-related offsets in the descriptor should not include
the length of the prepended metadata.  Adjust them.  Note that
this could not have caused issues in the past as we don't
support TSO with metadata prepend as of this patch.
Signed-off-by: default avatarMichael Rapson <michael.rapson@netronome.com>
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarDirk van der Merwe <dirk.vandermerwe@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8b5ddf1e
...@@ -647,27 +647,29 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q, ...@@ -647,27 +647,29 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
* @txbuf: Pointer to driver soft TX descriptor * @txbuf: Pointer to driver soft TX descriptor
* @txd: Pointer to HW TX descriptor * @txd: Pointer to HW TX descriptor
* @skb: Pointer to SKB * @skb: Pointer to SKB
* @md_bytes: Prepend length
* *
* Set up Tx descriptor for LSO, do nothing for non-LSO skbs. * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
* Return error on packet header greater than maximum supported LSO header size. * Return error on packet header greater than maximum supported LSO header size.
*/ */
static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec, static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_buf *txbuf, struct nfp_net_tx_buf *txbuf,
struct nfp_net_tx_desc *txd, struct sk_buff *skb) struct nfp_net_tx_desc *txd, struct sk_buff *skb,
u32 md_bytes)
{ {
u32 hdrlen; u32 l3_offset, l4_offset, hdrlen;
u16 mss; u16 mss;
if (!skb_is_gso(skb)) if (!skb_is_gso(skb))
return; return;
if (!skb->encapsulation) { if (!skb->encapsulation) {
txd->l3_offset = skb_network_offset(skb); l3_offset = skb_network_offset(skb);
txd->l4_offset = skb_transport_offset(skb); l4_offset = skb_transport_offset(skb);
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
} else { } else {
txd->l3_offset = skb_inner_network_offset(skb); l3_offset = skb_inner_network_offset(skb);
txd->l4_offset = skb_inner_transport_offset(skb); l4_offset = skb_inner_transport_offset(skb);
hdrlen = skb_inner_transport_header(skb) - skb->data + hdrlen = skb_inner_transport_header(skb) - skb->data +
inner_tcp_hdrlen(skb); inner_tcp_hdrlen(skb);
} }
...@@ -676,7 +678,9 @@ static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec, ...@@ -676,7 +678,9 @@ static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1); txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK; mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
txd->lso_hdrlen = hdrlen; txd->l3_offset = l3_offset - md_bytes;
txd->l4_offset = l4_offset - md_bytes;
txd->lso_hdrlen = hdrlen - md_bytes;
txd->mss = cpu_to_le16(mss); txd->mss = cpu_to_le16(mss);
txd->flags |= PCIE_DESC_TX_LSO; txd->flags |= PCIE_DESC_TX_LSO;
...@@ -851,7 +855,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) ...@@ -851,7 +855,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
txd->lso_hdrlen = 0; txd->lso_hdrlen = 0;
/* Do not reorder - tso may adjust pkt cnt, vlan may override fields */ /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
nfp_net_tx_tso(r_vec, txbuf, txd, skb); nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb); nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
txd->flags |= PCIE_DESC_TX_VLAN; txd->flags |= PCIE_DESC_TX_VLAN;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment