Commit 32626230 authored by Eilon Greenstein's avatar Eilon Greenstein Committed by David S. Miller

bnx2x: Memory allocation

Memory allocation
- The CQE ring was allocated to the max size even for a chip that does
  not support it. Fixed to allocate according to the chip type to save
  memory
- The rx_page_ring was not freed on driver unload
Signed-off-by: default avatarEilon Greenstein <eilong@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3fcaf2e5
...@@ -4252,7 +4252,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, ...@@ -4252,7 +4252,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
static void bnx2x_init_rx_rings(struct bnx2x *bp) static void bnx2x_init_rx_rings(struct bnx2x *bp)
{ {
int func = BP_FUNC(bp); int func = BP_FUNC(bp);
u16 ring_prod, cqe_ring_prod = 0; int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
ETH_MAX_AGGREGATION_QUEUES_E1H;
u16 ring_prod, cqe_ring_prod;
int i, j; int i, j;
bp->rx_buf_use_size = bp->dev->mtu; bp->rx_buf_use_size = bp->dev->mtu;
...@@ -4266,9 +4268,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) ...@@ -4266,9 +4268,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
bp->dev->mtu + ETH_OVREHEAD); bp->dev->mtu + ETH_OVREHEAD);
for_each_queue(bp, j) { for_each_queue(bp, j) {
for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) { struct bnx2x_fastpath *fp = &bp->fp[j];
struct bnx2x_fastpath *fp = &bp->fp[j];
for (i = 0; i < max_agg_queues; i++) {
fp->tpa_pool[i].skb = fp->tpa_pool[i].skb =
netdev_alloc_skb(bp->dev, bp->rx_buf_size); netdev_alloc_skb(bp->dev, bp->rx_buf_size);
if (!fp->tpa_pool[i].skb) { if (!fp->tpa_pool[i].skb) {
...@@ -4348,8 +4350,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) ...@@ -4348,8 +4350,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
BNX2X_ERR("disabling TPA for queue[%d]\n", j); BNX2X_ERR("disabling TPA for queue[%d]\n", j);
/* Cleanup already allocated elements */ /* Cleanup already allocated elements */
bnx2x_free_rx_sge_range(bp, fp, ring_prod); bnx2x_free_rx_sge_range(bp, fp, ring_prod);
bnx2x_free_tpa_pool(bp, fp, bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
ETH_MAX_AGGREGATION_QUEUES_E1H);
fp->disable_tpa = 1; fp->disable_tpa = 1;
ring_prod = 0; ring_prod = 0;
break; break;
...@@ -5772,6 +5773,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) ...@@ -5772,6 +5773,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
NUM_RCQ_BD); NUM_RCQ_BD);
/* SGE ring */ /* SGE ring */
BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
bnx2x_fp(bp, i, rx_sge_mapping), bnx2x_fp(bp, i, rx_sge_mapping),
BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
...@@ -5949,7 +5951,8 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) ...@@ -5949,7 +5951,8 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
dev_kfree_skb(skb); dev_kfree_skb(skb);
} }
if (!fp->disable_tpa) if (!fp->disable_tpa)
bnx2x_free_tpa_pool(bp, fp, bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
ETH_MAX_AGGREGATION_QUEUES_E1 :
ETH_MAX_AGGREGATION_QUEUES_E1H); ETH_MAX_AGGREGATION_QUEUES_E1H);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment