Commit e558b1fb authored by Karicheri, Muralidharan's avatar Karicheri, Muralidharan Committed by David S. Miller

net: netcp: add error check to netcp_allocate_rx_buf()

Currently, if netcp_allocate_rx_buf() fails due no descriptors
in the rx free descriptor queue, inside the netcp_rxpool_refill() function
the iterative loop to fill buffers doesn't terminate right away. So modify
the netcp_allocate_rx_buf() to return an error code and use it break the
loop when there is error.
Signed-off-by: default avatarMurali Karicheri <m-karicheri2@ti.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 736532a0
...@@ -801,7 +801,7 @@ static void netcp_rxpool_free(struct netcp_intf *netcp) ...@@ -801,7 +801,7 @@ static void netcp_rxpool_free(struct netcp_intf *netcp)
netcp->rx_pool = NULL; netcp->rx_pool = NULL;
} }
static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
{ {
struct knav_dma_desc *hwdesc; struct knav_dma_desc *hwdesc;
unsigned int buf_len, dma_sz; unsigned int buf_len, dma_sz;
...@@ -815,7 +815,7 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) ...@@ -815,7 +815,7 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
hwdesc = knav_pool_desc_get(netcp->rx_pool); hwdesc = knav_pool_desc_get(netcp->rx_pool);
if (IS_ERR_OR_NULL(hwdesc)) { if (IS_ERR_OR_NULL(hwdesc)) {
dev_dbg(netcp->ndev_dev, "out of rx pool desc\n"); dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
return; return -ENOMEM;
} }
if (likely(fdq == 0)) { if (likely(fdq == 0)) {
...@@ -867,25 +867,26 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) ...@@ -867,25 +867,26 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma, knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
&dma_sz); &dma_sz);
knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0); knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
return; return 0;
fail: fail:
knav_pool_desc_put(netcp->rx_pool, hwdesc); knav_pool_desc_put(netcp->rx_pool, hwdesc);
return -ENOMEM;
} }
/* Refill Rx FDQ with descriptors & attached buffers */ /* Refill Rx FDQ with descriptors & attached buffers */
static void netcp_rxpool_refill(struct netcp_intf *netcp) static void netcp_rxpool_refill(struct netcp_intf *netcp)
{ {
u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
int i; int i, ret = 0;
/* Calculate the FDQ deficit and refill */ /* Calculate the FDQ deficit and refill */
for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
fdq_deficit[i] = netcp->rx_queue_depths[i] - fdq_deficit[i] = netcp->rx_queue_depths[i] -
knav_queue_get_count(netcp->rx_fdq[i]); knav_queue_get_count(netcp->rx_fdq[i]);
while (fdq_deficit[i]--) while (fdq_deficit[i]-- && !ret)
netcp_allocate_rx_buf(netcp, i); ret = netcp_allocate_rx_buf(netcp, i);
} /* end for fdqs */ } /* end for fdqs */
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment