Commit 6f2b6a30 authored by Neil Horman's avatar Neil Horman Committed by David S. Miller

3c59x: Add dma error checking and recovery

Noted that 3c59x has no checks on transmit for failed DMA mappings, and no
ability to unmap fragments when a single map fails in the middle of a transmit.
This patch provides error checking to ensure that dma mappings work properly,
and unrolls an skb mapping if a fragmented skb transmission has a mapping
failure to prevent leaks.
Signed-off-by: default avatarNeil Horman <nhorman@tuxdriver.com>
CC: Linux Kernel list <linux-kernel@vger.kernel.org>
CC: "David S. Miller" <davem@davemloft.net>
CC: Meelis Roos <mroos@linux.ee>
Tested-by: default avatarMeelis Roos <mroos@linux.ee>
parent f6f2332d
...@@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
int entry = vp->cur_tx % TX_RING_SIZE; int entry = vp->cur_tx % TX_RING_SIZE;
struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
unsigned long flags; unsigned long flags;
dma_addr_t dma_addr;
if (vortex_debug > 6) { if (vortex_debug > 6) {
pr_debug("boomerang_start_xmit()\n"); pr_debug("boomerang_start_xmit()\n");
...@@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
if (!skb_shinfo(skb)->nr_frags) { if (!skb_shinfo(skb)->nr_frags) {
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
skb->len, PCI_DMA_TODEVICE)); PCI_DMA_TODEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
goto out_dma_err;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
} else { } else {
int i; int i;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE)); skb_headlen(skb), PCI_DMA_TODEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
goto out_dma_err;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
frag->page_offset,
frag->size,
DMA_TO_DEVICE);
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
for(i = i-1; i >= 0; i--)
dma_unmap_page(&VORTEX_PCI(vp)->dev,
le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
DMA_TO_DEVICE);
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
le32_to_cpu(vp->tx_ring[entry].frag[0].length),
PCI_DMA_TODEVICE);
goto out_dma_err;
}
vp->tx_ring[entry].frag[i+1].addr = vp->tx_ring[entry].frag[i+1].addr =
cpu_to_le32(skb_frag_dma_map( cpu_to_le32(dma_addr);
&VORTEX_PCI(vp)->dev,
frag,
frag->page_offset, frag->size, DMA_TO_DEVICE));
if (i == skb_shinfo(skb)->nr_frags-1) if (i == skb_shinfo(skb)->nr_frags-1)
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
...@@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
} }
#else #else
vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
goto out_dma_err;
vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
#endif #endif
...@@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
iowrite16(DownUnstall, ioaddr + EL3_CMD); iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags); spin_unlock_irqrestore(&vp->lock, flags);
out:
return NETDEV_TX_OK; return NETDEV_TX_OK;
out_dma_err:
dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
goto out;
} }
/* The interrupt handler does all of the Rx thread work and cleans up /* The interrupt handler does all of the Rx thread work and cleans up
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment