Commit 37c85c34 authored by Dan Carpenter's avatar Dan Carpenter Committed by David S. Miller

net: sxgbe: fix error handling in init_rx_ring()

There are a couple bugs with the error handling in this function.

1) If we can't allocate "rx_ring->rx_skbuff" then we should call
   dma_free_coherent() but we don't.
2) free_rx_ring() frees "rx_ring->rx_skbuff_dma" and "rx_ring->rx_skbuff"
   so calling it in a loop causes a double free.

Also it was a bit confusing how we sometimes freed things before doing
the goto.  I've cleaned it up so it does error handling in normal kernel
style.
Signed-off-by: default avatarDan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e8a308af
...@@ -365,6 +365,26 @@ static int sxgbe_init_rx_buffers(struct net_device *dev, ...@@ -365,6 +365,26 @@ static int sxgbe_init_rx_buffers(struct net_device *dev,
return 0; return 0;
} }
/**
* sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated
* @dev: net device structure
* @rx_ring: ring to be freed
* @rx_rsize: ring size
* Description: this function initializes the DMA RX descriptor
*/
static void sxgbe_free_rx_buffers(struct net_device *dev,
struct sxgbe_rx_norm_desc *p, int i,
unsigned int dma_buf_sz,
struct sxgbe_rx_queue *rx_ring)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
kfree_skb(rx_ring->rx_skbuff[i]);
dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i],
dma_buf_sz, DMA_FROM_DEVICE);
}
/** /**
* init_tx_ring - init the TX descriptor ring * init_tx_ring - init the TX descriptor ring
* @dev: net device structure * @dev: net device structure
...@@ -457,7 +477,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, ...@@ -457,7 +477,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
/* RX ring is not allcoated */ /* RX ring is not allcoated */
if (rx_ring == NULL) { if (rx_ring == NULL) {
netdev_err(dev, "No memory for RX queue\n"); netdev_err(dev, "No memory for RX queue\n");
goto error; return -ENOMEM;
} }
/* assign queue number */ /* assign queue number */
...@@ -469,23 +489,21 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, ...@@ -469,23 +489,21 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
&rx_ring->dma_rx_phy, GFP_KERNEL); &rx_ring->dma_rx_phy, GFP_KERNEL);
if (rx_ring->dma_rx == NULL) if (rx_ring->dma_rx == NULL)
goto error; return -ENOMEM;
/* allocate memory for RX skbuff array */ /* allocate memory for RX skbuff array */
rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
sizeof(dma_addr_t), GFP_KERNEL); sizeof(dma_addr_t), GFP_KERNEL);
if (!rx_ring->rx_skbuff_dma) { if (!rx_ring->rx_skbuff_dma) {
dma_free_coherent(priv->device, ret = -ENOMEM;
rx_rsize * sizeof(struct sxgbe_rx_norm_desc), goto err_free_dma_rx;
rx_ring->dma_rx, rx_ring->dma_rx_phy);
goto error;
} }
rx_ring->rx_skbuff = kmalloc_array(rx_rsize, rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
sizeof(struct sk_buff *), GFP_KERNEL); sizeof(struct sk_buff *), GFP_KERNEL);
if (!rx_ring->rx_skbuff) { if (!rx_ring->rx_skbuff) {
kfree(rx_ring->rx_skbuff_dma); ret = -ENOMEM;
goto error; goto err_free_skbuff_dma;
} }
/* initialise the buffers */ /* initialise the buffers */
...@@ -495,7 +513,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, ...@@ -495,7 +513,7 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
ret = sxgbe_init_rx_buffers(dev, p, desc_index, ret = sxgbe_init_rx_buffers(dev, p, desc_index,
bfsize, rx_ring); bfsize, rx_ring);
if (ret) if (ret)
goto err_init_rx_buffers; goto err_free_rx_buffers;
} }
/* initalise counters */ /* initalise counters */
...@@ -505,11 +523,22 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, ...@@ -505,11 +523,22 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
return 0; return 0;
err_init_rx_buffers: err_free_rx_buffers:
while (--desc_index >= 0) while (--desc_index >= 0) {
free_rx_ring(priv->device, rx_ring, desc_index); struct sxgbe_rx_norm_desc *p;
error:
return -ENOMEM; p = rx_ring->dma_rx + desc_index;
sxgbe_free_rx_buffers(dev, p, desc_index, bfsize, rx_ring);
}
kfree(rx_ring->rx_skbuff);
err_free_skbuff_dma:
kfree(rx_ring->rx_skbuff_dma);
err_free_dma_rx:
dma_free_coherent(priv->device,
rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
rx_ring->dma_rx, rx_ring->dma_rx_phy);
return ret;
} }
/** /**
* free_tx_ring - free the TX descriptor ring * free_tx_ring - free the TX descriptor ring
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment