Commit 5738a09d authored by Alexey Khoroshilov's avatar Alexey Khoroshilov Committed by David S. Miller

vmxnet3: fix checks for dma mapping errors

vmxnet3_drv does not check dma_addr with dma_mapping_error()
after mapping dma memory. The patch adds the checks and
tries to handle failures.

Found by Linux Driver Verification project (linuxtesting.org).
Signed-off-by: default avatarAlexey Khoroshilov <khoroshilov@ispras.ru>
Acked-by: default avatarShrikrishna Khare <skhare@vmware.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ee9159dd
...@@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, ...@@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
&adapter->pdev->dev, &adapter->pdev->dev,
rbi->skb->data, rbi->len, rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
dev_kfree_skb_any(rbi->skb);
rq->stats.rx_buf_alloc_failure++;
break;
}
} else { } else {
/* rx buffer skipped by the device */ /* rx buffer skipped by the device */
} }
...@@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, ...@@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
&adapter->pdev->dev, &adapter->pdev->dev,
rbi->page, 0, PAGE_SIZE, rbi->page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
put_page(rbi->page);
rq->stats.rx_buf_alloc_failure++;
break;
}
} else { } else {
/* rx buffers skipped by the device */ /* rx buffers skipped by the device */
} }
val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
} }
BUG_ON(rbi->dma_addr == 0);
gd->rxd.addr = cpu_to_le64(rbi->dma_addr); gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
| val | rbi->len); | val | rbi->len);
...@@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, ...@@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
} }
static void static int
vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
struct vmxnet3_adapter *adapter) struct vmxnet3_adapter *adapter)
...@@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, ...@@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
tbi->dma_addr = dma_map_single(&adapter->pdev->dev, tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
skb->data + buf_offset, buf_size, skb->data + buf_offset, buf_size,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
return -EFAULT;
tbi->len = buf_size; tbi->len = buf_size;
...@@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, ...@@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
buf_offset, buf_size, buf_offset, buf_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
return -EFAULT;
tbi->len = buf_size; tbi->len = buf_size;
...@@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, ...@@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
/* set the last buf_info for the pkt */ /* set the last buf_info for the pkt */
tbi->skb = skb; tbi->skb = skb;
tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
return 0;
} }
...@@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, ...@@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
} }
/* fill tx descs related to addr & len */ /* fill tx descs related to addr & len */
vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
goto unlock_drop_pkt;
/* setup the EOP desc */ /* setup the EOP desc */
ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
...@@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_rx_buf_info *rbi; struct vmxnet3_rx_buf_info *rbi;
struct sk_buff *skb, *new_skb = NULL; struct sk_buff *skb, *new_skb = NULL;
struct page *new_page = NULL; struct page *new_page = NULL;
dma_addr_t new_dma_addr;
int num_to_alloc; int num_to_alloc;
struct Vmxnet3_RxDesc *rxd; struct Vmxnet3_RxDesc *rxd;
u32 idx, ring_idx; u32 idx, ring_idx;
...@@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skip_page_frags = true; skip_page_frags = true;
goto rcd_done; goto rcd_done;
} }
new_dma_addr = dma_map_single(&adapter->pdev->dev,
new_skb->data, rbi->len,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
new_dma_addr)) {
dev_kfree_skb(new_skb);
/* Skb allocation failed, do not handover this
* skb to stack. Reuse it. Drop the existing pkt
*/
rq->stats.rx_buf_alloc_failure++;
ctx->skb = NULL;
rq->stats.drop_total++;
skip_page_frags = true;
goto rcd_done;
}
dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
rbi->len, rbi->len,
...@@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
/* Immediate refill */ /* Immediate refill */
rbi->skb = new_skb; rbi->skb = new_skb;
rbi->dma_addr = dma_map_single(&adapter->pdev->dev, rbi->dma_addr = new_dma_addr;
rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr); rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len; rxd->len = rbi->len;
if (adapter->version == 2 && if (adapter->version == 2 &&
...@@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skip_page_frags = true; skip_page_frags = true;
goto rcd_done; goto rcd_done;
} }
new_dma_addr = dma_map_page(&adapter->pdev->dev
, rbi->page,
0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
new_dma_addr)) {
put_page(new_page);
rq->stats.rx_buf_alloc_failure++;
dev_kfree_skb(ctx->skb);
ctx->skb = NULL;
skip_page_frags = true;
goto rcd_done;
}
dma_unmap_page(&adapter->pdev->dev, dma_unmap_page(&adapter->pdev->dev,
rbi->dma_addr, rbi->len, rbi->dma_addr, rbi->len,
...@@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
/* Immediate refill */ /* Immediate refill */
rbi->page = new_page; rbi->page = new_page;
rbi->dma_addr = dma_map_page(&adapter->pdev->dev rbi->dma_addr = new_dma_addr;
, rbi->page,
0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr); rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len; rxd->len = rbi->len;
} }
...@@ -2167,7 +2209,8 @@ vmxnet3_set_mc(struct net_device *netdev) ...@@ -2167,7 +2209,8 @@ vmxnet3_set_mc(struct net_device *netdev)
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
} }
if (new_table_pa) { if (!dma_mapping_error(&adapter->pdev->dev,
new_table_pa)) {
new_mode |= VMXNET3_RXM_MCAST; new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTablePA = cpu_to_le64(new_table_pa); rxConf->mfTablePA = cpu_to_le64(new_table_pa);
} else { } else {
...@@ -3075,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev, ...@@ -3075,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
sizeof(struct vmxnet3_adapter), sizeof(struct vmxnet3_adapter),
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
dev_err(&pdev->dev, "Failed to map dma\n");
err = -EFAULT;
goto err_dma_map;
}
adapter->shared = dma_alloc_coherent( adapter->shared = dma_alloc_coherent(
&adapter->pdev->dev, &adapter->pdev->dev,
sizeof(struct Vmxnet3_DriverShared), sizeof(struct Vmxnet3_DriverShared),
...@@ -3233,6 +3281,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, ...@@ -3233,6 +3281,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
err_alloc_shared: err_alloc_shared:
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
err_dma_map:
free_netdev(netdev); free_netdev(netdev);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment