Commit 74dd0c42 authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

cnic: Add functions to allocate and free UIO rings

These functions are needed to free up memory when the rings are no longer
needed.
Reviewed-by: default avatarEddie Wai <eddie.wai@broadcom.com>
Reviewed-by: default avatarBhanu Prakash Gollapudi <bprakash@broadcom.com>
Signed-off-by: default avatarMichael Chan <mchan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b6069a95
...@@ -823,10 +823,8 @@ static void cnic_free_context(struct cnic_dev *dev) ...@@ -823,10 +823,8 @@ static void cnic_free_context(struct cnic_dev *dev)
} }
} }
static void __cnic_free_uio(struct cnic_uio_dev *udev) static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
{ {
uio_unregister_device(&udev->cnic_uinfo);
if (udev->l2_buf) { if (udev->l2_buf) {
dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
udev->l2_buf, udev->l2_buf_map); udev->l2_buf, udev->l2_buf_map);
...@@ -839,6 +837,14 @@ static void __cnic_free_uio(struct cnic_uio_dev *udev) ...@@ -839,6 +837,14 @@ static void __cnic_free_uio(struct cnic_uio_dev *udev)
udev->l2_ring = NULL; udev->l2_ring = NULL;
} }
}
static void __cnic_free_uio(struct cnic_uio_dev *udev)
{
uio_unregister_device(&udev->cnic_uinfo);
__cnic_free_uio_rings(udev);
pci_dev_put(udev->pdev); pci_dev_put(udev->pdev);
kfree(udev); kfree(udev);
} }
...@@ -996,6 +1002,34 @@ static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info, ...@@ -996,6 +1002,34 @@ static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
return 0; return 0;
} }
static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
{
struct cnic_local *cp = udev->dev->cnic_priv;
if (udev->l2_ring)
return 0;
udev->l2_ring_size = pages * BCM_PAGE_SIZE;
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
&udev->l2_ring_map,
GFP_KERNEL | __GFP_COMP);
if (!udev->l2_ring)
return -ENOMEM;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
&udev->l2_buf_map,
GFP_KERNEL | __GFP_COMP);
if (!udev->l2_buf) {
__cnic_free_uio_rings(udev);
return -ENOMEM;
}
return 0;
}
static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
{ {
struct cnic_local *cp = dev->cnic_priv; struct cnic_local *cp = dev->cnic_priv;
...@@ -1020,20 +1054,9 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) ...@@ -1020,20 +1054,9 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
udev->dev = dev; udev->dev = dev;
udev->pdev = dev->pcidev; udev->pdev = dev->pcidev;
udev->l2_ring_size = pages * BCM_PAGE_SIZE;
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
&udev->l2_ring_map,
GFP_KERNEL | __GFP_COMP);
if (!udev->l2_ring)
goto err_udev;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; if (__cnic_alloc_uio_rings(udev, pages))
udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); goto err_udev;
udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
&udev->l2_buf_map,
GFP_KERNEL | __GFP_COMP);
if (!udev->l2_buf)
goto err_dma;
write_lock(&cnic_dev_lock); write_lock(&cnic_dev_lock);
list_add(&udev->list, &cnic_udev_list); list_add(&udev->list, &cnic_udev_list);
...@@ -1044,9 +1067,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) ...@@ -1044,9 +1067,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
cp->udev = udev; cp->udev = udev;
return 0; return 0;
err_dma:
dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
udev->l2_ring, udev->l2_ring_map);
err_udev: err_udev:
kfree(udev); kfree(udev);
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment