Commit 718e8898 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by John W. Linville

b43: replace the ssb_dma API with the generic DMA API

Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Stefano Brivio <stefano.brivio@polimi.it>
Cc: John W. Linville <linville@tuxdriver.com>
Acked-by: default avatarMichael Buesch <mb@bu3sch.de>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarLarry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 4e803132
...@@ -333,10 +333,10 @@ static inline ...@@ -333,10 +333,10 @@ static inline
dma_addr_t dmaaddr; dma_addr_t dmaaddr;
if (tx) { if (tx) {
dmaaddr = ssb_dma_map_single(ring->dev->dev, dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
buf, len, DMA_TO_DEVICE); buf, len, DMA_TO_DEVICE);
} else { } else {
dmaaddr = ssb_dma_map_single(ring->dev->dev, dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
buf, len, DMA_FROM_DEVICE); buf, len, DMA_FROM_DEVICE);
} }
...@@ -348,10 +348,10 @@ static inline ...@@ -348,10 +348,10 @@ static inline
dma_addr_t addr, size_t len, int tx) dma_addr_t addr, size_t len, int tx)
{ {
if (tx) { if (tx) {
ssb_dma_unmap_single(ring->dev->dev, dma_unmap_single(ring->dev->dev->dma_dev,
addr, len, DMA_TO_DEVICE); addr, len, DMA_TO_DEVICE);
} else { } else {
ssb_dma_unmap_single(ring->dev->dev, dma_unmap_single(ring->dev->dev->dma_dev,
addr, len, DMA_FROM_DEVICE); addr, len, DMA_FROM_DEVICE);
} }
} }
...@@ -361,7 +361,7 @@ static inline ...@@ -361,7 +361,7 @@ static inline
dma_addr_t addr, size_t len) dma_addr_t addr, size_t len)
{ {
B43_WARN_ON(ring->tx); B43_WARN_ON(ring->tx);
ssb_dma_sync_single_for_cpu(ring->dev->dev, dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
addr, len, DMA_FROM_DEVICE); addr, len, DMA_FROM_DEVICE);
} }
...@@ -370,7 +370,7 @@ static inline ...@@ -370,7 +370,7 @@ static inline
dma_addr_t addr, size_t len) dma_addr_t addr, size_t len)
{ {
B43_WARN_ON(ring->tx); B43_WARN_ON(ring->tx);
ssb_dma_sync_single_for_device(ring->dev->dev, dma_sync_single_for_device(ring->dev->dev->dma_dev,
addr, len, DMA_FROM_DEVICE); addr, len, DMA_FROM_DEVICE);
} }
...@@ -401,7 +401,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring) ...@@ -401,7 +401,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
*/ */
if (ring->type == B43_DMA_64BIT) if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA; flags |= GFP_DMA;
ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
B43_DMA_RINGMEMSIZE, B43_DMA_RINGMEMSIZE,
&(ring->dmabase), flags); &(ring->dmabase), flags);
if (!ring->descbase) { if (!ring->descbase) {
...@@ -420,8 +420,8 @@ static void free_ringmemory(struct b43_dmaring *ring) ...@@ -420,8 +420,8 @@ static void free_ringmemory(struct b43_dmaring *ring)
if (ring->type == B43_DMA_64BIT) if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA; flags |= GFP_DMA;
ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE, dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
ring->descbase, ring->dmabase, flags); ring->descbase, ring->dmabase);
} }
/* Reset the RX DMA channel */ /* Reset the RX DMA channel */
...@@ -528,7 +528,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring, ...@@ -528,7 +528,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
dma_addr_t addr, dma_addr_t addr,
size_t buffersize, bool dma_to_device) size_t buffersize, bool dma_to_device)
{ {
if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
return 1; return 1;
switch (ring->type) { switch (ring->type) {
...@@ -874,7 +874,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, ...@@ -874,7 +874,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
goto err_kfree_meta; goto err_kfree_meta;
/* test for ability to dma to txhdr_cache */ /* test for ability to dma to txhdr_cache */
dma_test = ssb_dma_map_single(dev->dev, dma_test = dma_map_single(dev->dev->dma_dev,
ring->txhdr_cache, ring->txhdr_cache,
b43_txhdr_size(dev), b43_txhdr_size(dev),
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -889,7 +889,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, ...@@ -889,7 +889,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
if (!ring->txhdr_cache) if (!ring->txhdr_cache)
goto err_kfree_meta; goto err_kfree_meta;
dma_test = ssb_dma_map_single(dev->dev, dma_test = dma_map_single(dev->dev->dma_dev,
ring->txhdr_cache, ring->txhdr_cache,
b43_txhdr_size(dev), b43_txhdr_size(dev),
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -903,7 +903,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, ...@@ -903,7 +903,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
} }
} }
ssb_dma_unmap_single(dev->dev, dma_unmap_single(dev->dev->dma_dev,
dma_test, b43_txhdr_size(dev), dma_test, b43_txhdr_size(dev),
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
...@@ -1018,9 +1018,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) ...@@ -1018,9 +1018,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
/* Try to set the DMA mask. If it fails, try falling back to a /* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */ * lower mask, as we can always also support a lower one. */
while (1) { while (1) {
err = ssb_dma_set_mask(dev->dev, mask); err = dma_set_mask(dev->dev->dma_dev, mask);
if (!err) {
err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
if (!err) if (!err)
break; break;
}
if (mask == DMA_BIT_MASK(64)) { if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32); mask = DMA_BIT_MASK(32);
fallback = 1; fallback = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment