Commit 4489d8f5 authored by Christophe JAILLET's avatar Christophe JAILLET Committed by David S. Miller

net: chelsio: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below.

It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.

It has been compile tested.

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent df70303d
...@@ -3235,15 +3235,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3235,15 +3235,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device; goto out_disable_device;
} }
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1; pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");
goto out_release_regions;
}
} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
dev_err(&pdev->dev, "no usable DMA configuration\n"); dev_err(&pdev->dev, "no usable DMA configuration\n");
goto out_release_regions; goto out_release_regions;
} }
......
...@@ -244,8 +244,8 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, ...@@ -244,8 +244,8 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
frag_idx = d->fragidx; frag_idx = d->fragidx;
if (frag_idx == 0 && skb_headlen(skb)) { if (frag_idx == 0 && skb_headlen(skb)) {
pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), dma_unmap_single(&pdev->dev, be64_to_cpu(sgp->addr[0]),
skb_headlen(skb), PCI_DMA_TODEVICE); skb_headlen(skb), DMA_TO_DEVICE);
j = 1; j = 1;
} }
...@@ -253,9 +253,9 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, ...@@ -253,9 +253,9 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
nfrags = skb_shinfo(skb)->nr_frags; nfrags = skb_shinfo(skb)->nr_frags;
while (frag_idx < nfrags && curflit < WR_FLITS) { while (frag_idx < nfrags && curflit < WR_FLITS) {
pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), dma_unmap_page(&pdev->dev, be64_to_cpu(sgp->addr[j]),
skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]), skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
j ^= 1; j ^= 1;
if (j == 0) { if (j == 0) {
sgp++; sgp++;
...@@ -355,15 +355,14 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, ...@@ -355,15 +355,14 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
if (q->use_pages && d->pg_chunk.page) { if (q->use_pages && d->pg_chunk.page) {
(*d->pg_chunk.p_cnt)--; (*d->pg_chunk.p_cnt)--;
if (!*d->pg_chunk.p_cnt) if (!*d->pg_chunk.p_cnt)
pci_unmap_page(pdev, dma_unmap_page(&pdev->dev, d->pg_chunk.mapping,
d->pg_chunk.mapping, q->alloc_size, DMA_FROM_DEVICE);
q->alloc_size, PCI_DMA_FROMDEVICE);
put_page(d->pg_chunk.page); put_page(d->pg_chunk.page);
d->pg_chunk.page = NULL; d->pg_chunk.page = NULL;
} else { } else {
pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr), dma_unmap_single(&pdev->dev, dma_unmap_addr(d, dma_addr),
q->buf_size, PCI_DMA_FROMDEVICE); q->buf_size, DMA_FROM_DEVICE);
kfree_skb(d->skb); kfree_skb(d->skb);
d->skb = NULL; d->skb = NULL;
} }
...@@ -414,8 +413,8 @@ static inline int add_one_rx_buf(void *va, unsigned int len, ...@@ -414,8 +413,8 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
{ {
dma_addr_t mapping; dma_addr_t mapping;
mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); mapping = dma_map_single(&pdev->dev, va, len, DMA_FROM_DEVICE);
if (unlikely(pci_dma_mapping_error(pdev, mapping))) if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
return -ENOMEM; return -ENOMEM;
dma_unmap_addr_set(sd, dma_addr, mapping); dma_unmap_addr_set(sd, dma_addr, mapping);
...@@ -453,9 +452,9 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, ...@@ -453,9 +452,9 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
SGE_PG_RSVD; SGE_PG_RSVD;
q->pg_chunk.offset = 0; q->pg_chunk.offset = 0;
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page,
0, q->alloc_size, PCI_DMA_FROMDEVICE); 0, q->alloc_size, DMA_FROM_DEVICE);
if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { if (unlikely(dma_mapping_error(&adapter->pdev->dev, mapping))) {
__free_pages(q->pg_chunk.page, order); __free_pages(q->pg_chunk.page, order);
q->pg_chunk.page = NULL; q->pg_chunk.page = NULL;
return -EIO; return -EIO;
...@@ -522,9 +521,9 @@ nomem: q->alloc_failed++; ...@@ -522,9 +521,9 @@ nomem: q->alloc_failed++;
dma_unmap_addr_set(sd, dma_addr, mapping); dma_unmap_addr_set(sd, dma_addr, mapping);
add_one_rx_chunk(mapping, d, q->gen); add_one_rx_chunk(mapping, d, q->gen);
pci_dma_sync_single_for_device(adap->pdev, mapping, dma_sync_single_for_device(&adap->pdev->dev, mapping,
q->buf_size - SGE_PG_RSVD, q->buf_size - SGE_PG_RSVD,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
} else { } else {
void *buf_start; void *buf_start;
...@@ -793,13 +792,13 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, ...@@ -793,13 +792,13 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
skb = alloc_skb(len, GFP_ATOMIC); skb = alloc_skb(len, GFP_ATOMIC);
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
__skb_put(skb, len); __skb_put(skb, len);
pci_dma_sync_single_for_cpu(adap->pdev, dma_sync_single_for_cpu(&adap->pdev->dev,
dma_unmap_addr(sd, dma_addr), len, dma_unmap_addr(sd, dma_addr),
PCI_DMA_FROMDEVICE); len, DMA_FROM_DEVICE);
memcpy(skb->data, sd->skb->data, len); memcpy(skb->data, sd->skb->data, len);
pci_dma_sync_single_for_device(adap->pdev, dma_sync_single_for_device(&adap->pdev->dev,
dma_unmap_addr(sd, dma_addr), len, dma_unmap_addr(sd, dma_addr),
PCI_DMA_FROMDEVICE); len, DMA_FROM_DEVICE);
} else if (!drop_thres) } else if (!drop_thres)
goto use_orig_buf; goto use_orig_buf;
recycle: recycle:
...@@ -813,8 +812,8 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, ...@@ -813,8 +812,8 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
goto recycle; goto recycle;
use_orig_buf: use_orig_buf:
pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), dma_unmap_single(&adap->pdev->dev, dma_unmap_addr(sd, dma_addr),
fl->buf_size, PCI_DMA_FROMDEVICE); fl->buf_size, DMA_FROM_DEVICE);
skb = sd->skb; skb = sd->skb;
skb_put(skb, len); skb_put(skb, len);
__refill_fl(adap, fl); __refill_fl(adap, fl);
...@@ -854,12 +853,11 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, ...@@ -854,12 +853,11 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
newskb = alloc_skb(len, GFP_ATOMIC); newskb = alloc_skb(len, GFP_ATOMIC);
if (likely(newskb != NULL)) { if (likely(newskb != NULL)) {
__skb_put(newskb, len); __skb_put(newskb, len);
pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr,
PCI_DMA_FROMDEVICE); len, DMA_FROM_DEVICE);
memcpy(newskb->data, sd->pg_chunk.va, len); memcpy(newskb->data, sd->pg_chunk.va, len);
pci_dma_sync_single_for_device(adap->pdev, dma_addr, dma_sync_single_for_device(&adap->pdev->dev, dma_addr,
len, len, DMA_FROM_DEVICE);
PCI_DMA_FROMDEVICE);
} else if (!drop_thres) } else if (!drop_thres)
return NULL; return NULL;
recycle: recycle:
...@@ -883,14 +881,12 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, ...@@ -883,14 +881,12 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
goto recycle; goto recycle;
} }
pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr, len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
(*sd->pg_chunk.p_cnt)--; (*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
pci_unmap_page(adap->pdev, dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
sd->pg_chunk.mapping, fl->alloc_size, DMA_FROM_DEVICE);
fl->alloc_size,
PCI_DMA_FROMDEVICE);
if (!skb) { if (!skb) {
__skb_put(newskb, SGE_RX_PULL_LEN); __skb_put(newskb, SGE_RX_PULL_LEN);
memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
...@@ -968,9 +964,9 @@ static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, ...@@ -968,9 +964,9 @@ static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
const struct skb_shared_info *si; const struct skb_shared_info *si;
if (skb_headlen(skb)) { if (skb_headlen(skb)) {
*addr = pci_map_single(pdev, skb->data, skb_headlen(skb), *addr = dma_map_single(&pdev->dev, skb->data,
PCI_DMA_TODEVICE); skb_headlen(skb), DMA_TO_DEVICE);
if (pci_dma_mapping_error(pdev, *addr)) if (dma_mapping_error(&pdev->dev, *addr))
goto out_err; goto out_err;
addr++; addr++;
} }
...@@ -981,7 +977,7 @@ static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, ...@@ -981,7 +977,7 @@ static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
for (fp = si->frags; fp < end; fp++) { for (fp = si->frags; fp < end; fp++) {
*addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), *addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (pci_dma_mapping_error(pdev, *addr)) if (dma_mapping_error(&pdev->dev, *addr))
goto unwind; goto unwind;
addr++; addr++;
} }
...@@ -992,7 +988,8 @@ static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, ...@@ -992,7 +988,8 @@ static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
DMA_TO_DEVICE); DMA_TO_DEVICE);
pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); dma_unmap_single(&pdev->dev, addr[-1], skb_headlen(skb),
DMA_TO_DEVICE);
out_err: out_err:
return -ENOMEM; return -ENOMEM;
} }
...@@ -1592,13 +1589,14 @@ static void deferred_unmap_destructor(struct sk_buff *skb) ...@@ -1592,13 +1589,14 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
p = dui->addr; p = dui->addr;
if (skb_tail_pointer(skb) - skb_transport_header(skb)) if (skb_tail_pointer(skb) - skb_transport_header(skb))
pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) - dma_unmap_single(&dui->pdev->dev, *p++,
skb_transport_header(skb), PCI_DMA_TODEVICE); skb_tail_pointer(skb) - skb_transport_header(skb),
DMA_TO_DEVICE);
si = skb_shinfo(skb); si = skb_shinfo(skb);
for (i = 0; i < si->nr_frags; i++) for (i = 0; i < si->nr_frags; i++)
pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]), dma_unmap_page(&dui->pdev->dev, *p++,
PCI_DMA_TODEVICE); skb_frag_size(&si->frags[i]), DMA_TO_DEVICE);
} }
static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
...@@ -2153,17 +2151,14 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, ...@@ -2153,17 +2151,14 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
fl->credits--; fl->credits--;
pci_dma_sync_single_for_cpu(adap->pdev, dma_sync_single_for_cpu(&adap->pdev->dev,
dma_unmap_addr(sd, dma_addr), dma_unmap_addr(sd, dma_addr),
fl->buf_size - SGE_PG_RSVD, fl->buf_size - SGE_PG_RSVD, DMA_FROM_DEVICE);
PCI_DMA_FROMDEVICE);
(*sd->pg_chunk.p_cnt)--; (*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
pci_unmap_page(adap->pdev, dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
sd->pg_chunk.mapping, fl->alloc_size, DMA_FROM_DEVICE);
fl->alloc_size,
PCI_DMA_FROMDEVICE);
if (!skb) { if (!skb) {
put_page(sd->pg_chunk.page); put_page(sd->pg_chunk.page);
......
...@@ -4008,7 +4008,7 @@ static void adap_free_hma_mem(struct adapter *adapter) ...@@ -4008,7 +4008,7 @@ static void adap_free_hma_mem(struct adapter *adapter)
if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) { if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl, dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL); adapter->hma.sgt->nents, DMA_BIDIRECTIONAL);
adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG; adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
} }
...@@ -6687,16 +6687,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -6687,16 +6687,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0; return 0;
} }
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
highdma = true; highdma = true;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");
goto out_free_adapter;
}
} else { } else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) { if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n"); dev_err(&pdev->dev, "no usable DMA configuration\n");
goto out_free_adapter; goto out_free_adapter;
......
...@@ -443,7 +443,7 @@ static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) ...@@ -443,7 +443,7 @@ static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
if (is_buf_mapped(d)) if (is_buf_mapped(d))
dma_unmap_page(adap->pdev_dev, get_buf_addr(d), dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
get_buf_size(adap, d), get_buf_size(adap, d),
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
put_page(d->page); put_page(d->page);
d->page = NULL; d->page = NULL;
if (++q->cidx == q->size) if (++q->cidx == q->size)
...@@ -469,7 +469,7 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) ...@@ -469,7 +469,7 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
if (is_buf_mapped(d)) if (is_buf_mapped(d))
dma_unmap_page(adap->pdev_dev, get_buf_addr(d), dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
get_buf_size(adap, d), PCI_DMA_FROMDEVICE); get_buf_size(adap, d), DMA_FROM_DEVICE);
d->page = NULL; d->page = NULL;
if (++q->cidx == q->size) if (++q->cidx == q->size)
q->cidx = 0; q->cidx = 0;
...@@ -566,7 +566,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, ...@@ -566,7 +566,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
mapping = dma_map_page(adap->pdev_dev, pg, 0, mapping = dma_map_page(adap->pdev_dev, pg, 0,
PAGE_SIZE << s->fl_pg_order, PAGE_SIZE << s->fl_pg_order,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
__free_pages(pg, s->fl_pg_order); __free_pages(pg, s->fl_pg_order);
q->mapping_err++; q->mapping_err++;
...@@ -596,7 +596,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, ...@@ -596,7 +596,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
} }
mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
put_page(pg); put_page(pg);
q->mapping_err++; q->mapping_err++;
......
...@@ -2917,17 +2917,11 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, ...@@ -2917,17 +2917,11 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* Set up our DMA mask: try for 64-bit address masking first and * Set up our DMA mask: try for 64-bit address masking first and
* fall back to 32-bit if we can't get 64 bits ... * fall back to 32-bit if we can't get 64 bits ...
*/ */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err == 0) { if (err == 0) {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
" coherent allocations\n");
goto err_release_regions;
}
pci_using_dac = 1; pci_using_dac = 1;
} else { } else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err != 0) { if (err != 0) {
dev_err(&pdev->dev, "no usable DMA configuration\n"); dev_err(&pdev->dev, "no usable DMA configuration\n");
goto err_release_regions; goto err_release_regions;
......
...@@ -478,7 +478,7 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) ...@@ -478,7 +478,7 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
if (is_buf_mapped(sdesc)) if (is_buf_mapped(sdesc))
dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
get_buf_size(adapter, sdesc), get_buf_size(adapter, sdesc),
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
put_page(sdesc->page); put_page(sdesc->page);
sdesc->page = NULL; sdesc->page = NULL;
if (++fl->cidx == fl->size) if (++fl->cidx == fl->size)
...@@ -507,7 +507,7 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) ...@@ -507,7 +507,7 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
if (is_buf_mapped(sdesc)) if (is_buf_mapped(sdesc))
dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
get_buf_size(adapter, sdesc), get_buf_size(adapter, sdesc),
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
sdesc->page = NULL; sdesc->page = NULL;
if (++fl->cidx == fl->size) if (++fl->cidx == fl->size)
fl->cidx = 0; fl->cidx = 0;
...@@ -644,7 +644,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, ...@@ -644,7 +644,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
dma_addr = dma_map_page(adapter->pdev_dev, page, 0, dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
PAGE_SIZE << s->fl_pg_order, PAGE_SIZE << s->fl_pg_order,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
/* /*
* We've run out of DMA mapping space. Free up the * We've run out of DMA mapping space. Free up the
...@@ -682,7 +682,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, ...@@ -682,7 +682,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
poison_buf(page, PAGE_SIZE); poison_buf(page, PAGE_SIZE);
dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
put_page(page); put_page(page);
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment