Commit 5d63ccea authored by Christophe JAILLET's avatar Christophe JAILLET Committed by David S. Miller

starfire: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.

When memory is allocated in 'netdev_open()', GFP_ATOMIC must be used
because it can be called from a .ndo_tx_timeout function.
So this function can be called with the 'netif_tx_lock' acquired.
The call chain is:
  --> tx_timeout                 (.ndo_tx_timeout function)
    --> netdev_open

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c8acc09c
...@@ -886,7 +886,9 @@ static int netdev_open(struct net_device *dev) ...@@ -886,7 +886,9 @@ static int netdev_open(struct net_device *dev)
tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE; rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size; np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma); np->queue_mem = dma_alloc_coherent(&np->pci_dev->dev,
np->queue_mem_size,
&np->queue_mem_dma, GFP_ATOMIC);
if (np->queue_mem == NULL) { if (np->queue_mem == NULL) {
free_irq(irq, dev); free_irq(irq, dev);
return -ENOMEM; return -ENOMEM;
...@@ -1136,9 +1138,11 @@ static void init_ring(struct net_device *dev) ...@@ -1136,9 +1138,11 @@ static void init_ring(struct net_device *dev)
np->rx_info[i].skb = skb; np->rx_info[i].skb = skb;
if (skb == NULL) if (skb == NULL)
break; break;
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev,
if (pci_dma_mapping_error(np->pci_dev, skb->data,
np->rx_info[i].mapping)) { np->rx_buf_sz,
DMA_FROM_DEVICE);
if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
np->rx_info[i].skb = NULL; np->rx_info[i].skb = NULL;
break; break;
...@@ -1217,18 +1221,19 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -1217,18 +1221,19 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16); status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
np->tx_info[entry].mapping = np->tx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE); dma_map_single(&np->pci_dev->dev, skb->data,
skb_first_frag_len(skb),
DMA_TO_DEVICE);
} else { } else {
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1]; const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
status |= skb_frag_size(this_frag); status |= skb_frag_size(this_frag);
np->tx_info[entry].mapping = np->tx_info[entry].mapping =
pci_map_single(np->pci_dev, dma_map_single(&np->pci_dev->dev,
skb_frag_address(this_frag), skb_frag_address(this_frag),
skb_frag_size(this_frag), skb_frag_size(this_frag),
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
} }
if (pci_dma_mapping_error(np->pci_dev, if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) {
np->tx_info[entry].mapping)) {
dev->stats.tx_dropped++; dev->stats.tx_dropped++;
goto err_out; goto err_out;
} }
...@@ -1271,18 +1276,16 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -1271,18 +1276,16 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
entry = prev_tx % TX_RING_SIZE; entry = prev_tx % TX_RING_SIZE;
np->tx_info[entry].skb = NULL; np->tx_info[entry].skb = NULL;
if (i > 0) { if (i > 0) {
pci_unmap_single(np->pci_dev, dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping, np->tx_info[entry].mapping,
skb_first_frag_len(skb), skb_first_frag_len(skb), DMA_TO_DEVICE);
PCI_DMA_TODEVICE);
np->tx_info[entry].mapping = 0; np->tx_info[entry].mapping = 0;
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
for (j = 1; j < i; j++) { for (j = 1; j < i; j++) {
pci_unmap_single(np->pci_dev, dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping, np->tx_info[entry].mapping,
skb_frag_size( skb_frag_size(&skb_shinfo(skb)->frags[j - 1]),
&skb_shinfo(skb)->frags[j-1]), DMA_TO_DEVICE);
PCI_DMA_TODEVICE);
entry++; entry++;
} }
} }
...@@ -1356,20 +1359,20 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) ...@@ -1356,20 +1359,20 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc); u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
struct sk_buff *skb = np->tx_info[entry].skb; struct sk_buff *skb = np->tx_info[entry].skb;
np->tx_info[entry].skb = NULL; np->tx_info[entry].skb = NULL;
pci_unmap_single(np->pci_dev, dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping, np->tx_info[entry].mapping,
skb_first_frag_len(skb), skb_first_frag_len(skb),
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
np->tx_info[entry].mapping = 0; np->tx_info[entry].mapping = 0;
np->dirty_tx += np->tx_info[entry].used_slots; np->dirty_tx += np->tx_info[entry].used_slots;
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
{ {
int i; int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
pci_unmap_single(np->pci_dev, dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping, np->tx_info[entry].mapping,
skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
np->dirty_tx++; np->dirty_tx++;
entry++; entry++;
} }
...@@ -1461,16 +1464,18 @@ static int __netdev_rx(struct net_device *dev, int *quota) ...@@ -1461,16 +1464,18 @@ static int __netdev_rx(struct net_device *dev, int *quota)
if (pkt_len < rx_copybreak && if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */ skb_reserve(skb, 2); /* 16 byte align the IP header */
pci_dma_sync_single_for_cpu(np->pci_dev, dma_sync_single_for_cpu(&np->pci_dev->dev,
np->rx_info[entry].mapping, np->rx_info[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE); pkt_len, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len); skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev, dma_sync_single_for_device(&np->pci_dev->dev,
np->rx_info[entry].mapping, np->rx_info[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE); pkt_len, DMA_FROM_DEVICE);
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
} else { } else {
pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE); dma_unmap_single(&np->pci_dev->dev,
np->rx_info[entry].mapping,
np->rx_buf_sz, DMA_FROM_DEVICE);
skb = np->rx_info[entry].skb; skb = np->rx_info[entry].skb;
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
np->rx_info[entry].skb = NULL; np->rx_info[entry].skb = NULL;
...@@ -1588,9 +1593,9 @@ static void refill_rx_ring(struct net_device *dev) ...@@ -1588,9 +1593,9 @@ static void refill_rx_ring(struct net_device *dev)
if (skb == NULL) if (skb == NULL)
break; /* Better luck next round. */ break; /* Better luck next round. */
np->rx_info[entry].mapping = np->rx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); dma_map_single(&np->pci_dev->dev, skb->data,
if (pci_dma_mapping_error(np->pci_dev, np->rx_buf_sz, DMA_FROM_DEVICE);
np->rx_info[entry].mapping)) { if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
np->rx_info[entry].skb = NULL; np->rx_info[entry].skb = NULL;
break; break;
...@@ -1963,7 +1968,9 @@ static int netdev_close(struct net_device *dev) ...@@ -1963,7 +1968,9 @@ static int netdev_close(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */ np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
if (np->rx_info[i].skb != NULL) { if (np->rx_info[i].skb != NULL) {
pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE); dma_unmap_single(&np->pci_dev->dev,
np->rx_info[i].mapping,
np->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(np->rx_info[i].skb); dev_kfree_skb(np->rx_info[i].skb);
} }
np->rx_info[i].skb = NULL; np->rx_info[i].skb = NULL;
...@@ -1973,9 +1980,8 @@ static int netdev_close(struct net_device *dev) ...@@ -1973,9 +1980,8 @@ static int netdev_close(struct net_device *dev)
struct sk_buff *skb = np->tx_info[i].skb; struct sk_buff *skb = np->tx_info[i].skb;
if (skb == NULL) if (skb == NULL)
continue; continue;
pci_unmap_single(np->pci_dev, dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping,
np->tx_info[i].mapping, skb_first_frag_len(skb), DMA_TO_DEVICE);
skb_first_frag_len(skb), PCI_DMA_TODEVICE);
np->tx_info[i].mapping = 0; np->tx_info[i].mapping = 0;
dev_kfree_skb(skb); dev_kfree_skb(skb);
np->tx_info[i].skb = NULL; np->tx_info[i].skb = NULL;
...@@ -2018,7 +2024,8 @@ static void starfire_remove_one(struct pci_dev *pdev) ...@@ -2018,7 +2024,8 @@ static void starfire_remove_one(struct pci_dev *pdev)
unregister_netdev(dev); unregister_netdev(dev);
if (np->queue_mem) if (np->queue_mem)
pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma); dma_free_coherent(&pdev->dev, np->queue_mem_size,
np->queue_mem, np->queue_mem_dma);
/* XXX: add wakeup code -- requires firmware for MagicPacket */ /* XXX: add wakeup code -- requires firmware for MagicPacket */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment