Commit 70e3bb50 authored by Divy Le Ray's avatar Divy Le Ray Committed by David S. Miller

cxgb3: fix premature page unmap

unmap Rx page only when guaranteed that this page won't be
used anymore to allocate rx page chunks.
Signed-off-by: default avatarDivy Le Ray <divy@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4acc5a8e
...@@ -879,7 +879,7 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, ...@@ -879,7 +879,7 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
(*sd->pg_chunk.p_cnt)--; (*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt) if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
pci_unmap_page(adap->pdev, pci_unmap_page(adap->pdev,
sd->pg_chunk.mapping, sd->pg_chunk.mapping,
fl->alloc_size, fl->alloc_size,
...@@ -2088,7 +2088,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, ...@@ -2088,7 +2088,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
(*sd->pg_chunk.p_cnt)--; (*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt) if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
pci_unmap_page(adap->pdev, pci_unmap_page(adap->pdev,
sd->pg_chunk.mapping, sd->pg_chunk.mapping,
fl->alloc_size, fl->alloc_size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment