Commit 5911419f authored by Christophe JAILLET's avatar Christophe JAILLET Committed by David S. Miller

tulip: windbond-840: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.

When memory is allocated in 'alloc_ringdesc()' GFP_KERNEL can be used
because it is only called from 'netdev_open()' which is a '.ndo_open'
function. Such functions are synchronized using the rtnl_lock() semaphore
and no lock is taken in the between.

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b49db89e
...@@ -364,7 +364,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -364,7 +364,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
irq = pdev->irq; irq = pdev->irq;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
pr_warn("Device %s disabled due to DMA limitations\n", pr_warn("Device %s disabled due to DMA limitations\n",
pci_name(pdev)); pci_name(pdev));
return -EIO; return -EIO;
...@@ -802,8 +802,9 @@ static void init_rxtx_rings(struct net_device *dev) ...@@ -802,8 +802,9 @@ static void init_rxtx_rings(struct net_device *dev)
np->rx_skbuff[i] = skb; np->rx_skbuff[i] = skb;
if (skb == NULL) if (skb == NULL)
break; break;
np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, np->rx_addr[i] = dma_map_single(&np->pci_dev->dev, skb->data,
np->rx_buf_sz,PCI_DMA_FROMDEVICE); np->rx_buf_sz,
DMA_FROM_DEVICE);
np->rx_ring[i].buffer1 = np->rx_addr[i]; np->rx_ring[i].buffer1 = np->rx_addr[i];
np->rx_ring[i].status = DescOwned; np->rx_ring[i].status = DescOwned;
...@@ -833,20 +834,17 @@ static void free_rxtx_rings(struct netdev_private* np) ...@@ -833,20 +834,17 @@ static void free_rxtx_rings(struct netdev_private* np)
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].status = 0; np->rx_ring[i].status = 0;
if (np->rx_skbuff[i]) { if (np->rx_skbuff[i]) {
pci_unmap_single(np->pci_dev, dma_unmap_single(&np->pci_dev->dev, np->rx_addr[i],
np->rx_addr[i], np->rx_skbuff[i]->len,
np->rx_skbuff[i]->len, DMA_FROM_DEVICE);
PCI_DMA_FROMDEVICE);
dev_kfree_skb(np->rx_skbuff[i]); dev_kfree_skb(np->rx_skbuff[i]);
} }
np->rx_skbuff[i] = NULL; np->rx_skbuff[i] = NULL;
} }
for (i = 0; i < TX_RING_SIZE; i++) { for (i = 0; i < TX_RING_SIZE; i++) {
if (np->tx_skbuff[i]) { if (np->tx_skbuff[i]) {
pci_unmap_single(np->pci_dev, dma_unmap_single(&np->pci_dev->dev, np->tx_addr[i],
np->tx_addr[i], np->tx_skbuff[i]->len, DMA_TO_DEVICE);
np->tx_skbuff[i]->len,
PCI_DMA_TODEVICE);
dev_kfree_skb(np->tx_skbuff[i]); dev_kfree_skb(np->tx_skbuff[i]);
} }
np->tx_skbuff[i] = NULL; np->tx_skbuff[i] = NULL;
...@@ -964,10 +962,10 @@ static int alloc_ringdesc(struct net_device *dev) ...@@ -964,10 +962,10 @@ static int alloc_ringdesc(struct net_device *dev)
np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
np->rx_ring = pci_alloc_consistent(np->pci_dev, np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
sizeof(struct w840_rx_desc)*RX_RING_SIZE + sizeof(struct w840_rx_desc) * RX_RING_SIZE +
sizeof(struct w840_tx_desc)*TX_RING_SIZE, sizeof(struct w840_tx_desc) * TX_RING_SIZE,
&np->ring_dma_addr); &np->ring_dma_addr, GFP_KERNEL);
if(!np->rx_ring) if(!np->rx_ring)
return -ENOMEM; return -ENOMEM;
init_rxtx_rings(dev); init_rxtx_rings(dev);
...@@ -976,10 +974,10 @@ static int alloc_ringdesc(struct net_device *dev) ...@@ -976,10 +974,10 @@ static int alloc_ringdesc(struct net_device *dev)
static void free_ringdesc(struct netdev_private *np) static void free_ringdesc(struct netdev_private *np)
{ {
pci_free_consistent(np->pci_dev, dma_free_coherent(&np->pci_dev->dev,
sizeof(struct w840_rx_desc)*RX_RING_SIZE + sizeof(struct w840_rx_desc) * RX_RING_SIZE +
sizeof(struct w840_tx_desc)*TX_RING_SIZE, sizeof(struct w840_tx_desc) * TX_RING_SIZE,
np->rx_ring, np->ring_dma_addr); np->rx_ring, np->ring_dma_addr);
} }
...@@ -994,8 +992,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -994,8 +992,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
/* Calculate the next Tx descriptor entry. */ /* Calculate the next Tx descriptor entry. */
entry = np->cur_tx % TX_RING_SIZE; entry = np->cur_tx % TX_RING_SIZE;
np->tx_addr[entry] = pci_map_single(np->pci_dev, np->tx_addr[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
skb->data,skb->len, PCI_DMA_TODEVICE); skb->len, DMA_TO_DEVICE);
np->tx_skbuff[entry] = skb; np->tx_skbuff[entry] = skb;
np->tx_ring[entry].buffer1 = np->tx_addr[entry]; np->tx_ring[entry].buffer1 = np->tx_addr[entry];
...@@ -1078,9 +1076,8 @@ static void netdev_tx_done(struct net_device *dev) ...@@ -1078,9 +1076,8 @@ static void netdev_tx_done(struct net_device *dev)
np->stats.tx_packets++; np->stats.tx_packets++;
} }
/* Free the original skb. */ /* Free the original skb. */
pci_unmap_single(np->pci_dev,np->tx_addr[entry], dma_unmap_single(&np->pci_dev->dev, np->tx_addr[entry],
np->tx_skbuff[entry]->len, np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
PCI_DMA_TODEVICE);
np->tx_q_bytes -= np->tx_skbuff[entry]->len; np->tx_q_bytes -= np->tx_skbuff[entry]->len;
dev_kfree_skb_irq(np->tx_skbuff[entry]); dev_kfree_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL; np->tx_skbuff[entry] = NULL;
...@@ -1217,18 +1214,21 @@ static int netdev_rx(struct net_device *dev) ...@@ -1217,18 +1214,21 @@ static int netdev_rx(struct net_device *dev)
if (pkt_len < rx_copybreak && if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */ skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], dma_sync_single_for_cpu(&np->pci_dev->dev,
np->rx_skbuff[entry]->len, np->rx_addr[entry],
PCI_DMA_FROMDEVICE); np->rx_skbuff[entry]->len,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry], dma_sync_single_for_device(&np->pci_dev->dev,
np->rx_skbuff[entry]->len, np->rx_addr[entry],
PCI_DMA_FROMDEVICE); np->rx_skbuff[entry]->len,
DMA_FROM_DEVICE);
} else { } else {
pci_unmap_single(np->pci_dev,np->rx_addr[entry], dma_unmap_single(&np->pci_dev->dev,
np->rx_skbuff[entry]->len, np->rx_addr[entry],
PCI_DMA_FROMDEVICE); np->rx_skbuff[entry]->len,
DMA_FROM_DEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len); skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL; np->rx_skbuff[entry] = NULL;
} }
...@@ -1258,9 +1258,10 @@ static int netdev_rx(struct net_device *dev) ...@@ -1258,9 +1258,10 @@ static int netdev_rx(struct net_device *dev)
np->rx_skbuff[entry] = skb; np->rx_skbuff[entry] = skb;
if (skb == NULL) if (skb == NULL)
break; /* Better luck next round. */ break; /* Better luck next round. */
np->rx_addr[entry] = pci_map_single(np->pci_dev, np->rx_addr[entry] = dma_map_single(&np->pci_dev->dev,
skb->data, skb->data,
np->rx_buf_sz, PCI_DMA_FROMDEVICE); np->rx_buf_sz,
DMA_FROM_DEVICE);
np->rx_ring[entry].buffer1 = np->rx_addr[entry]; np->rx_ring[entry].buffer1 = np->rx_addr[entry];
} }
wmb(); wmb();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment