Commit bf7bec46 authored by Christophe JAILLET's avatar Christophe JAILLET Committed by David S. Miller

vmxnet3: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below.

It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.

The explicit 'err = -EIO;' has been removed because
'dma_set_mask_and_coherent()' returns 0 or -EIO, so its return code can be
used directly.

It has been compile tested.

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 75bacb6d
...@@ -314,10 +314,10 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, ...@@ -314,10 +314,10 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
{ {
if (tbi->map_type == VMXNET3_MAP_SINGLE) if (tbi->map_type == VMXNET3_MAP_SINGLE)
dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len, dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
else if (tbi->map_type == VMXNET3_MAP_PAGE) else if (tbi->map_type == VMXNET3_MAP_PAGE)
dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len, dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
else else
BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
...@@ -585,7 +585,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, ...@@ -585,7 +585,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
rbi->dma_addr = dma_map_single( rbi->dma_addr = dma_map_single(
&adapter->pdev->dev, &adapter->pdev->dev,
rbi->skb->data, rbi->len, rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) { rbi->dma_addr)) {
dev_kfree_skb_any(rbi->skb); dev_kfree_skb_any(rbi->skb);
...@@ -609,7 +609,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, ...@@ -609,7 +609,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
rbi->dma_addr = dma_map_page( rbi->dma_addr = dma_map_page(
&adapter->pdev->dev, &adapter->pdev->dev,
rbi->page, 0, PAGE_SIZE, rbi->page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) { rbi->dma_addr)) {
put_page(rbi->page); put_page(rbi->page);
...@@ -723,7 +723,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, ...@@ -723,7 +723,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
tbi->map_type = VMXNET3_MAP_SINGLE; tbi->map_type = VMXNET3_MAP_SINGLE;
tbi->dma_addr = dma_map_single(&adapter->pdev->dev, tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
skb->data + buf_offset, buf_size, skb->data + buf_offset, buf_size,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
return -EFAULT; return -EFAULT;
...@@ -1449,7 +1449,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1449,7 +1449,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
new_dma_addr = new_dma_addr =
dma_map_single(&adapter->pdev->dev, dma_map_single(&adapter->pdev->dev,
new_skb->data, rbi->len, new_skb->data, rbi->len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, if (dma_mapping_error(&adapter->pdev->dev,
new_dma_addr)) { new_dma_addr)) {
dev_kfree_skb(new_skb); dev_kfree_skb(new_skb);
...@@ -1467,7 +1467,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1467,7 +1467,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
dma_unmap_single(&adapter->pdev->dev, dma_unmap_single(&adapter->pdev->dev,
rbi->dma_addr, rbi->dma_addr,
rbi->len, rbi->len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
/* Immediate refill */ /* Immediate refill */
rbi->skb = new_skb; rbi->skb = new_skb;
...@@ -1546,7 +1546,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1546,7 +1546,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
new_dma_addr = dma_map_page(&adapter->pdev->dev, new_dma_addr = dma_map_page(&adapter->pdev->dev,
new_page, new_page,
0, PAGE_SIZE, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, if (dma_mapping_error(&adapter->pdev->dev,
new_dma_addr)) { new_dma_addr)) {
put_page(new_page); put_page(new_page);
...@@ -1559,7 +1559,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ...@@ -1559,7 +1559,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
dma_unmap_page(&adapter->pdev->dev, dma_unmap_page(&adapter->pdev->dev,
rbi->dma_addr, rbi->len, rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
vmxnet3_append_frag(ctx->skb, rcd, rbi); vmxnet3_append_frag(ctx->skb, rcd, rbi);
...@@ -1677,13 +1677,13 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, ...@@ -1677,13 +1677,13 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
rq->buf_info[ring_idx][i].skb) { rq->buf_info[ring_idx][i].skb) {
dma_unmap_single(&adapter->pdev->dev, rxd->addr, dma_unmap_single(&adapter->pdev->dev, rxd->addr,
rxd->len, PCI_DMA_FROMDEVICE); rxd->len, DMA_FROM_DEVICE);
dev_kfree_skb(rq->buf_info[ring_idx][i].skb); dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
rq->buf_info[ring_idx][i].skb = NULL; rq->buf_info[ring_idx][i].skb = NULL;
} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
rq->buf_info[ring_idx][i].page) { rq->buf_info[ring_idx][i].page) {
dma_unmap_page(&adapter->pdev->dev, rxd->addr, dma_unmap_page(&adapter->pdev->dev, rxd->addr,
rxd->len, PCI_DMA_FROMDEVICE); rxd->len, DMA_FROM_DEVICE);
put_page(rq->buf_info[ring_idx][i].page); put_page(rq->buf_info[ring_idx][i].page);
rq->buf_info[ring_idx][i].page = NULL; rq->buf_info[ring_idx][i].page = NULL;
} }
...@@ -2419,7 +2419,7 @@ vmxnet3_set_mc(struct net_device *netdev) ...@@ -2419,7 +2419,7 @@ vmxnet3_set_mc(struct net_device *netdev)
&adapter->pdev->dev, &adapter->pdev->dev,
new_table, new_table,
sz, sz,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (!dma_mapping_error(&adapter->pdev->dev, if (!dma_mapping_error(&adapter->pdev->dev,
new_table_pa)) { new_table_pa)) {
new_mode |= VMXNET3_RXM_MCAST; new_mode |= VMXNET3_RXM_MCAST;
...@@ -2455,7 +2455,7 @@ vmxnet3_set_mc(struct net_device *netdev) ...@@ -2455,7 +2455,7 @@ vmxnet3_set_mc(struct net_device *netdev)
if (new_table_pa_valid) if (new_table_pa_valid)
dma_unmap_single(&adapter->pdev->dev, new_table_pa, dma_unmap_single(&adapter->pdev->dev, new_table_pa,
rxConf->mfTableLen, PCI_DMA_TODEVICE); rxConf->mfTableLen, DMA_TO_DEVICE);
kfree(new_table); kfree(new_table);
} }
...@@ -3438,19 +3438,12 @@ vmxnet3_probe_device(struct pci_dev *pdev, ...@@ -3438,19 +3438,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
dev_err(&pdev->dev,
"pci_set_consistent_dma_mask failed\n");
err = -EIO;
goto err_set_mask;
}
dma64 = true; dma64 = true;
} else { } else {
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
dev_err(&pdev->dev, if (err) {
"pci_set_dma_mask failed\n"); dev_err(&pdev->dev, "dma_set_mask failed\n");
err = -EIO;
goto err_set_mask; goto err_set_mask;
} }
dma64 = false; dma64 = false;
...@@ -3459,7 +3452,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, ...@@ -3459,7 +3452,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
spin_lock_init(&adapter->cmd_lock); spin_lock_init(&adapter->cmd_lock);
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
sizeof(struct vmxnet3_adapter), sizeof(struct vmxnet3_adapter),
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
dev_err(&pdev->dev, "Failed to map dma\n"); dev_err(&pdev->dev, "Failed to map dma\n");
err = -EFAULT; err = -EFAULT;
...@@ -3713,7 +3706,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, ...@@ -3713,7 +3706,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->shared, adapter->shared_pa); adapter->shared, adapter->shared_pa);
err_alloc_shared: err_alloc_shared:
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
err_set_mask: err_set_mask:
free_netdev(netdev); free_netdev(netdev);
return err; return err;
...@@ -3781,7 +3774,7 @@ vmxnet3_remove_device(struct pci_dev *pdev) ...@@ -3781,7 +3774,7 @@ vmxnet3_remove_device(struct pci_dev *pdev)
sizeof(struct Vmxnet3_DriverShared), sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa); adapter->shared, adapter->shared_pa);
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
free_netdev(netdev); free_netdev(netdev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment