Commit a4d7e485 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

vmxnet3: must split too big fragments

vmxnet3 has a 16Kbytes limit per tx descriptor, that happened to work
as long as we provided PAGE_SIZE fragments.

Our stack can now build larger fragments, so we need to split them to
the 16kbytes boundary.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Reported-by: default avatarjongman heo <jongman.heo@samsung.com>
Tested-by: default avatarjongman heo <jongman.heo@samsung.com>
Cc: Shreyas Bhatewara <sbhatewara@vmware.com>
Reviewed-by: default avatarBhavesh Davda <bhavesh@vmware.com>
Signed-off-by: default avatarShreyas Bhatewara <sbhatewara@vmware.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 78933636
...@@ -744,20 +744,31 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, ...@@ -744,20 +744,31 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
u32 buf_size;
buf_offset = 0;
len = skb_frag_size(frag);
while (len) {
tbi = tq->buf_info + tq->tx_ring.next2fill; tbi = tq->buf_info + tq->tx_ring.next2fill;
if (len < VMXNET3_MAX_TX_BUF_SIZE) {
buf_size = len;
dw2 |= len;
} else {
buf_size = VMXNET3_MAX_TX_BUF_SIZE;
/* spec says that for TxDesc.len, 0 == 2^14 */
}
tbi->map_type = VMXNET3_MAP_PAGE; tbi->map_type = VMXNET3_MAP_PAGE;
tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
0, skb_frag_size(frag), buf_offset, buf_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
tbi->len = skb_frag_size(frag); tbi->len = buf_size;
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag)); gdesc->dword[2] = cpu_to_le32(dw2);
gdesc->dword[3] = 0; gdesc->dword[3] = 0;
dev_dbg(&adapter->netdev->dev, dev_dbg(&adapter->netdev->dev,
...@@ -766,6 +777,10 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, ...@@ -766,6 +777,10 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
len -= buf_size;
buf_offset += buf_size;
}
} }
ctx->eop_txd = gdesc; ctx->eop_txd = gdesc;
...@@ -886,6 +901,18 @@ vmxnet3_prepare_tso(struct sk_buff *skb, ...@@ -886,6 +901,18 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
} }
} }
static int txd_estimate(const struct sk_buff *skb)
{
int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
}
return count;
}
/* /*
* Transmits a pkt thru a given tq * Transmits a pkt thru a given tq
...@@ -914,9 +941,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, ...@@ -914,9 +941,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
union Vmxnet3_GenericDesc tempTxDesc; union Vmxnet3_GenericDesc tempTxDesc;
#endif #endif
/* conservatively estimate # of descriptors to use */ count = txd_estimate(skb);
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
skb_shinfo(skb)->nr_frags + 1;
ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment