Commit b49db89e authored by Christophe JAILLET's avatar Christophe JAILLET Committed by David S. Miller

net: dl2k: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.

When memory is allocated in 'rio_probe1()' GFP_KERNEL can be used because
it is a probe function and no lock is taken in the between.

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fd9e4d6f
...@@ -222,13 +222,15 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -222,13 +222,15 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata (pdev, dev); pci_set_drvdata (pdev, dev);
ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
GFP_KERNEL);
if (!ring_space) if (!ring_space)
goto err_out_iounmap; goto err_out_iounmap;
np->tx_ring = ring_space; np->tx_ring = ring_space;
np->tx_ring_dma = ring_dma; np->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
GFP_KERNEL);
if (!ring_space) if (!ring_space)
goto err_out_unmap_tx; goto err_out_unmap_tx;
np->rx_ring = ring_space; np->rx_ring = ring_space;
...@@ -279,9 +281,11 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -279,9 +281,11 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
return 0; return 0;
err_out_unmap_rx: err_out_unmap_rx:
pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
np->rx_ring_dma);
err_out_unmap_tx: err_out_unmap_tx:
pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
err_out_iounmap: err_out_iounmap:
#ifdef MEM_MAPPING #ifdef MEM_MAPPING
pci_iounmap(pdev, np->ioaddr); pci_iounmap(pdev, np->ioaddr);
...@@ -435,8 +439,9 @@ static void free_list(struct net_device *dev) ...@@ -435,8 +439,9 @@ static void free_list(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < RX_RING_SIZE; i++) {
skb = np->rx_skbuff[i]; skb = np->rx_skbuff[i];
if (skb) { if (skb) {
pci_unmap_single(np->pdev, desc_to_dma(&np->rx_ring[i]), dma_unmap_single(&np->pdev->dev,
skb->len, PCI_DMA_FROMDEVICE); desc_to_dma(&np->rx_ring[i]),
skb->len, DMA_FROM_DEVICE);
dev_kfree_skb(skb); dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL; np->rx_skbuff[i] = NULL;
} }
...@@ -446,8 +451,9 @@ static void free_list(struct net_device *dev) ...@@ -446,8 +451,9 @@ static void free_list(struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++) { for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i]; skb = np->tx_skbuff[i];
if (skb) { if (skb) {
pci_unmap_single(np->pdev, desc_to_dma(&np->tx_ring[i]), dma_unmap_single(&np->pdev->dev,
skb->len, PCI_DMA_TODEVICE); desc_to_dma(&np->tx_ring[i]),
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb); dev_kfree_skb(skb);
np->tx_skbuff[i] = NULL; np->tx_skbuff[i] = NULL;
} }
...@@ -504,9 +510,8 @@ static int alloc_list(struct net_device *dev) ...@@ -504,9 +510,8 @@ static int alloc_list(struct net_device *dev)
sizeof(struct netdev_desc)); sizeof(struct netdev_desc));
/* Rubicon now supports 40 bits of addressing space. */ /* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo = np->rx_ring[i].fraginfo =
cpu_to_le64(pci_map_single( cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
np->pdev, skb->data, np->rx_buf_sz, np->rx_buf_sz, DMA_FROM_DEVICE));
PCI_DMA_FROMDEVICE));
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
} }
...@@ -672,9 +677,8 @@ rio_timer (struct timer_list *t) ...@@ -672,9 +677,8 @@ rio_timer (struct timer_list *t)
} }
np->rx_skbuff[entry] = skb; np->rx_skbuff[entry] = skb;
np->rx_ring[entry].fraginfo = np->rx_ring[entry].fraginfo =
cpu_to_le64 (pci_map_single cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
(np->pdev, skb->data, np->rx_buf_sz, np->rx_buf_sz, DMA_FROM_DEVICE));
PCI_DMA_FROMDEVICE));
} }
np->rx_ring[entry].fraginfo |= np->rx_ring[entry].fraginfo |=
cpu_to_le64((u64)np->rx_buf_sz << 48); cpu_to_le64((u64)np->rx_buf_sz << 48);
...@@ -728,9 +732,8 @@ start_xmit (struct sk_buff *skb, struct net_device *dev) ...@@ -728,9 +732,8 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
((u64)np->vlan << 32) | ((u64)np->vlan << 32) |
((u64)skb->priority << 45); ((u64)skb->priority << 45);
} }
txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, txdesc->fraginfo = cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
skb->len, skb->len, DMA_TO_DEVICE));
PCI_DMA_TODEVICE));
txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
/* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
...@@ -827,9 +830,9 @@ rio_free_tx (struct net_device *dev, int irq) ...@@ -827,9 +830,9 @@ rio_free_tx (struct net_device *dev, int irq)
if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone))) if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
break; break;
skb = np->tx_skbuff[entry]; skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev, dma_unmap_single(&np->pdev->dev,
desc_to_dma(&np->tx_ring[entry]), desc_to_dma(&np->tx_ring[entry]), skb->len,
skb->len, PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (irq) if (irq)
dev_consume_skb_irq(skb); dev_consume_skb_irq(skb);
else else
...@@ -949,25 +952,25 @@ receive_packet (struct net_device *dev) ...@@ -949,25 +952,25 @@ receive_packet (struct net_device *dev)
/* Small skbuffs for short packets */ /* Small skbuffs for short packets */
if (pkt_len > copy_thresh) { if (pkt_len > copy_thresh) {
pci_unmap_single (np->pdev, dma_unmap_single(&np->pdev->dev,
desc_to_dma(desc), desc_to_dma(desc),
np->rx_buf_sz, np->rx_buf_sz,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len); skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL; np->rx_skbuff[entry] = NULL;
} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
pci_dma_sync_single_for_cpu(np->pdev, dma_sync_single_for_cpu(&np->pdev->dev,
desc_to_dma(desc), desc_to_dma(desc),
np->rx_buf_sz, np->rx_buf_sz,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
skb_copy_to_linear_data (skb, skb_copy_to_linear_data (skb,
np->rx_skbuff[entry]->data, np->rx_skbuff[entry]->data,
pkt_len); pkt_len);
skb_put (skb, pkt_len); skb_put (skb, pkt_len);
pci_dma_sync_single_for_device(np->pdev, dma_sync_single_for_device(&np->pdev->dev,
desc_to_dma(desc), desc_to_dma(desc),
np->rx_buf_sz, np->rx_buf_sz,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
} }
skb->protocol = eth_type_trans (skb, dev); skb->protocol = eth_type_trans (skb, dev);
#if 0 #if 0
...@@ -1000,9 +1003,8 @@ receive_packet (struct net_device *dev) ...@@ -1000,9 +1003,8 @@ receive_packet (struct net_device *dev)
} }
np->rx_skbuff[entry] = skb; np->rx_skbuff[entry] = skb;
np->rx_ring[entry].fraginfo = np->rx_ring[entry].fraginfo =
cpu_to_le64 (pci_map_single cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
(np->pdev, skb->data, np->rx_buf_sz, np->rx_buf_sz, DMA_FROM_DEVICE));
PCI_DMA_FROMDEVICE));
} }
np->rx_ring[entry].fraginfo |= np->rx_ring[entry].fraginfo |=
cpu_to_le64((u64)np->rx_buf_sz << 48); cpu_to_le64((u64)np->rx_buf_sz << 48);
...@@ -1796,9 +1798,9 @@ rio_remove1 (struct pci_dev *pdev) ...@@ -1796,9 +1798,9 @@ rio_remove1 (struct pci_dev *pdev)
struct netdev_private *np = netdev_priv(dev); struct netdev_private *np = netdev_priv(dev);
unregister_netdev (dev); unregister_netdev (dev);
pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
np->rx_ring_dma); np->rx_ring_dma);
pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma); np->tx_ring_dma);
#ifdef MEM_MAPPING #ifdef MEM_MAPPING
pci_iounmap(pdev, np->ioaddr); pci_iounmap(pdev, np->ioaddr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment