Commit 83b2d939 authored by Christophe JAILLET's avatar Christophe JAILLET Committed by David S. Miller

net: jme: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below.

It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.

It has been compile tested.

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 05fbeb21
...@@ -734,17 +734,17 @@ jme_make_new_rx_buf(struct jme_adapter *jme, int i) ...@@ -734,17 +734,17 @@ jme_make_new_rx_buf(struct jme_adapter *jme, int i)
if (unlikely(!skb)) if (unlikely(!skb))
return -ENOMEM; return -ENOMEM;
mapping = pci_map_page(jme->pdev, virt_to_page(skb->data), mapping = dma_map_page(&jme->pdev->dev, virt_to_page(skb->data),
offset_in_page(skb->data), skb_tailroom(skb), offset_in_page(skb->data), skb_tailroom(skb),
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) { if (unlikely(dma_mapping_error(&jme->pdev->dev, mapping))) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
return -ENOMEM; return -ENOMEM;
} }
if (likely(rxbi->mapping)) if (likely(rxbi->mapping))
pci_unmap_page(jme->pdev, rxbi->mapping, dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len,
rxbi->len, PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
rxbi->skb = skb; rxbi->skb = skb;
rxbi->len = skb_tailroom(skb); rxbi->len = skb_tailroom(skb);
...@@ -760,10 +760,8 @@ jme_free_rx_buf(struct jme_adapter *jme, int i) ...@@ -760,10 +760,8 @@ jme_free_rx_buf(struct jme_adapter *jme, int i)
rxbi += i; rxbi += i;
if (rxbi->skb) { if (rxbi->skb) {
pci_unmap_page(jme->pdev, dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len,
rxbi->mapping, DMA_FROM_DEVICE);
rxbi->len,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(rxbi->skb); dev_kfree_skb(rxbi->skb);
rxbi->skb = NULL; rxbi->skb = NULL;
rxbi->mapping = 0; rxbi->mapping = 0;
...@@ -1005,16 +1003,12 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) ...@@ -1005,16 +1003,12 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
rxbi += idx; rxbi += idx;
skb = rxbi->skb; skb = rxbi->skb;
pci_dma_sync_single_for_cpu(jme->pdev, dma_sync_single_for_cpu(&jme->pdev->dev, rxbi->mapping, rxbi->len,
rxbi->mapping, DMA_FROM_DEVICE);
rxbi->len,
PCI_DMA_FROMDEVICE);
if (unlikely(jme_make_new_rx_buf(jme, idx))) { if (unlikely(jme_make_new_rx_buf(jme, idx))) {
pci_dma_sync_single_for_device(jme->pdev, dma_sync_single_for_device(&jme->pdev->dev, rxbi->mapping,
rxbi->mapping, rxbi->len, DMA_FROM_DEVICE);
rxbi->len,
PCI_DMA_FROMDEVICE);
++(NET_STAT(jme).rx_dropped); ++(NET_STAT(jme).rx_dropped);
} else { } else {
...@@ -1453,10 +1447,9 @@ static void jme_tx_clean_tasklet(struct tasklet_struct *t) ...@@ -1453,10 +1447,9 @@ static void jme_tx_clean_tasklet(struct tasklet_struct *t)
ttxbi = txbi + ((i + j) & (mask)); ttxbi = txbi + ((i + j) & (mask));
txdesc[(i + j) & (mask)].dw[0] = 0; txdesc[(i + j) & (mask)].dw[0] = 0;
pci_unmap_page(jme->pdev, dma_unmap_page(&jme->pdev->dev,
ttxbi->mapping, ttxbi->mapping, ttxbi->len,
ttxbi->len, DMA_TO_DEVICE);
PCI_DMA_TODEVICE);
ttxbi->mapping = 0; ttxbi->mapping = 0;
ttxbi->len = 0; ttxbi->len = 0;
...@@ -1966,19 +1959,13 @@ jme_fill_tx_map(struct pci_dev *pdev, ...@@ -1966,19 +1959,13 @@ jme_fill_tx_map(struct pci_dev *pdev,
{ {
dma_addr_t dmaaddr; dma_addr_t dmaaddr;
dmaaddr = pci_map_page(pdev, dmaaddr = dma_map_page(&pdev->dev, page, page_offset, len,
page, DMA_TO_DEVICE);
page_offset,
len,
PCI_DMA_TODEVICE);
if (unlikely(pci_dma_mapping_error(pdev, dmaaddr))) if (unlikely(dma_mapping_error(&pdev->dev, dmaaddr)))
return -EINVAL; return -EINVAL;
pci_dma_sync_single_for_device(pdev, dma_sync_single_for_device(&pdev->dev, dmaaddr, len, DMA_TO_DEVICE);
dmaaddr,
len,
PCI_DMA_TODEVICE);
txdesc->dw[0] = 0; txdesc->dw[0] = 0;
txdesc->dw[1] = 0; txdesc->dw[1] = 0;
...@@ -2003,10 +1990,8 @@ static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) ...@@ -2003,10 +1990,8 @@ static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
for (j = 0 ; j < count ; j++) { for (j = 0 ; j < count ; j++) {
ctxbi = txbi + ((startidx + j + 2) & (mask)); ctxbi = txbi + ((startidx + j + 2) & (mask));
pci_unmap_page(jme->pdev, dma_unmap_page(&jme->pdev->dev, ctxbi->mapping, ctxbi->len,
ctxbi->mapping, DMA_TO_DEVICE);
ctxbi->len,
PCI_DMA_TODEVICE);
ctxbi->mapping = 0; ctxbi->mapping = 0;
ctxbi->len = 0; ctxbi->len = 0;
...@@ -2859,18 +2844,15 @@ static int ...@@ -2859,18 +2844,15 @@ static int
jme_pci_dma64(struct pci_dev *pdev) jme_pci_dma64(struct pci_dev *pdev)
{ {
if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) return 1;
return 1;
if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) return 1;
return 1;
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) return 0;
return 0;
return -1; return -1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment