Commit ea72ab22 authored by Michael Buesch's avatar Michael Buesch Committed by John W. Linville

[PATCH] bcm43xx: sync with svn.berlios.de

Signed-off-by: default avatarMichael Buesch <mbuesch@freenet.de>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 70e5e983
...@@ -214,7 +214,9 @@ static int alloc_ringmemory(struct bcm43xx_dmaring *ring) ...@@ -214,7 +214,9 @@ static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
return -ENOMEM; return -ENOMEM;
} }
if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) { if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) {
printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G\n"); printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G "
"(0x%08x, len: %lu)\n",
ring->dmabase, BCM43xx_DMA_RINGMEMSIZE);
dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
ring->vbase, ring->dmabase); ring->vbase, ring->dmabase);
return -ENOMEM; return -ENOMEM;
...@@ -261,13 +263,6 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, ...@@ -261,13 +263,6 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
return 0; return 0;
} }
static inline int dmacontroller_rx_reset(struct bcm43xx_dmaring *ring)
{
assert(!ring->tx);
return bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
}
/* Reset the RX DMA channel */ /* Reset the RX DMA channel */
int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
u16 mmio_base) u16 mmio_base)
...@@ -308,13 +303,6 @@ int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, ...@@ -308,13 +303,6 @@ int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
return 0; return 0;
} }
static inline int dmacontroller_tx_reset(struct bcm43xx_dmaring *ring)
{
assert(ring->tx);
return bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
}
static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring, static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
struct bcm43xx_dmadesc *desc, struct bcm43xx_dmadesc *desc,
struct bcm43xx_dmadesc_meta *meta, struct bcm43xx_dmadesc_meta *meta,
...@@ -337,7 +325,9 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring, ...@@ -337,7 +325,9 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) { if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G\n"); printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G "
"(0x%08x, len: %u)\n",
dmaaddr, ring->rx_buffersize);
return -ENOMEM; return -ENOMEM;
} }
meta->skb = skb; meta->skb = skb;
...@@ -365,7 +355,7 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring, ...@@ -365,7 +355,7 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring) static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
{ {
int i, err = -ENOMEM; int i, err = -ENOMEM;
struct bcm43xx_dmadesc *desc = NULL; struct bcm43xx_dmadesc *desc;
struct bcm43xx_dmadesc_meta *meta; struct bcm43xx_dmadesc_meta *meta;
for (i = 0; i < ring->nr_slots; i++) { for (i = 0; i < ring->nr_slots; i++) {
...@@ -375,24 +365,20 @@ static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring) ...@@ -375,24 +365,20 @@ static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
if (err) if (err)
goto err_unwind; goto err_unwind;
assert(ring->used_slots <= ring->nr_slots);
} }
ring->used_slots = ring->nr_slots; ring->used_slots = ring->nr_slots;
err = 0; err = 0;
out: out:
return err; return err;
err_unwind: err_unwind:
for ( ; i >= 0; i--) { for (i--; i >= 0; i--) {
desc = ring->vbase + i; desc = ring->vbase + i;
meta = ring->meta + i; meta = ring->meta + i;
unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
dev_kfree_skb(meta->skb); dev_kfree_skb(meta->skb);
} }
ring->used_slots = 0;
goto out; goto out;
} }
...@@ -442,13 +428,13 @@ static int dmacontroller_setup(struct bcm43xx_dmaring *ring) ...@@ -442,13 +428,13 @@ static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring) static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
{ {
if (ring->tx) { if (ring->tx) {
dmacontroller_tx_reset(ring); bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
/* Zero out Transmit Descriptor ring address. */ /* Zero out Transmit Descriptor ring address. */
bcm43xx_write32(ring->bcm, bcm43xx_write32(ring->bcm,
ring->mmio_base + BCM43xx_DMA_TX_DESC_RING, ring->mmio_base + BCM43xx_DMA_TX_DESC_RING,
0x00000000); 0x00000000);
} else { } else {
dmacontroller_rx_reset(ring); bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
/* Zero out Receive Descriptor ring address. */ /* Zero out Receive Descriptor ring address. */
bcm43xx_write32(ring->bcm, bcm43xx_write32(ring->bcm,
ring->mmio_base + BCM43xx_DMA_RX_DESC_RING, ring->mmio_base + BCM43xx_DMA_RX_DESC_RING,
...@@ -508,9 +494,7 @@ struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm, ...@@ -508,9 +494,7 @@ struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
if (bcm->pci_dev->bus->number == 0) if (bcm->pci_dev->bus->number == 0)
ring->memoffset = 0; ring->memoffset = 0;
#endif #endif
spin_lock_init(&ring->lock);
ring->bcm = bcm; ring->bcm = bcm;
ring->nr_slots = nr_descriptor_slots; ring->nr_slots = nr_descriptor_slots;
ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100; ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
...@@ -578,22 +562,25 @@ static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring) ...@@ -578,22 +562,25 @@ static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
void bcm43xx_dma_free(struct bcm43xx_private *bcm) void bcm43xx_dma_free(struct bcm43xx_private *bcm)
{ {
bcm43xx_destroy_dmaring(bcm->current_core->dma->rx_ring1); struct bcm43xx_dma *dma = bcm->current_core->dma;
bcm->current_core->dma->rx_ring1 = NULL;
bcm43xx_destroy_dmaring(bcm->current_core->dma->rx_ring0); bcm43xx_destroy_dmaring(dma->rx_ring1);
bcm->current_core->dma->rx_ring0 = NULL; dma->rx_ring1 = NULL;
bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring3); bcm43xx_destroy_dmaring(dma->rx_ring0);
bcm->current_core->dma->tx_ring3 = NULL; dma->rx_ring0 = NULL;
bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring2); bcm43xx_destroy_dmaring(dma->tx_ring3);
bcm->current_core->dma->tx_ring2 = NULL; dma->tx_ring3 = NULL;
bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring1); bcm43xx_destroy_dmaring(dma->tx_ring2);
bcm->current_core->dma->tx_ring1 = NULL; dma->tx_ring2 = NULL;
bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring0); bcm43xx_destroy_dmaring(dma->tx_ring1);
bcm->current_core->dma->tx_ring0 = NULL; dma->tx_ring1 = NULL;
bcm43xx_destroy_dmaring(dma->tx_ring0);
dma->tx_ring0 = NULL;
} }
int bcm43xx_dma_init(struct bcm43xx_private *bcm) int bcm43xx_dma_init(struct bcm43xx_private *bcm)
{ {
struct bcm43xx_dma *dma = bcm->current_core->dma;
struct bcm43xx_dmaring *ring; struct bcm43xx_dmaring *ring;
int err = -ENOMEM; int err = -ENOMEM;
...@@ -602,39 +589,39 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm) ...@@ -602,39 +589,39 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm)
BCM43xx_TXRING_SLOTS, 1); BCM43xx_TXRING_SLOTS, 1);
if (!ring) if (!ring)
goto out; goto out;
bcm->current_core->dma->tx_ring0 = ring; dma->tx_ring0 = ring;
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE, ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE,
BCM43xx_TXRING_SLOTS, 1); BCM43xx_TXRING_SLOTS, 1);
if (!ring) if (!ring)
goto err_destroy_tx0; goto err_destroy_tx0;
bcm->current_core->dma->tx_ring1 = ring; dma->tx_ring1 = ring;
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE, ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE,
BCM43xx_TXRING_SLOTS, 1); BCM43xx_TXRING_SLOTS, 1);
if (!ring) if (!ring)
goto err_destroy_tx1; goto err_destroy_tx1;
bcm->current_core->dma->tx_ring2 = ring; dma->tx_ring2 = ring;
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE, ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
BCM43xx_TXRING_SLOTS, 1); BCM43xx_TXRING_SLOTS, 1);
if (!ring) if (!ring)
goto err_destroy_tx2; goto err_destroy_tx2;
bcm->current_core->dma->tx_ring3 = ring; dma->tx_ring3 = ring;
/* setup RX DMA channels. */ /* setup RX DMA channels. */
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE, ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
BCM43xx_RXRING_SLOTS, 0); BCM43xx_RXRING_SLOTS, 0);
if (!ring) if (!ring)
goto err_destroy_tx3; goto err_destroy_tx3;
bcm->current_core->dma->rx_ring0 = ring; dma->rx_ring0 = ring;
if (bcm->current_core->rev < 5) { if (bcm->current_core->rev < 5) {
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE, ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
BCM43xx_RXRING_SLOTS, 0); BCM43xx_RXRING_SLOTS, 0);
if (!ring) if (!ring)
goto err_destroy_rx0; goto err_destroy_rx0;
bcm->current_core->dma->rx_ring1 = ring; dma->rx_ring1 = ring;
} }
dprintk(KERN_INFO PFX "DMA initialized\n"); dprintk(KERN_INFO PFX "DMA initialized\n");
...@@ -643,27 +630,26 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm) ...@@ -643,27 +630,26 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm)
return err; return err;
err_destroy_rx0: err_destroy_rx0:
bcm43xx_destroy_dmaring(bcm->current_core->dma->rx_ring0); bcm43xx_destroy_dmaring(dma->rx_ring0);
bcm->current_core->dma->rx_ring0 = NULL; dma->rx_ring0 = NULL;
err_destroy_tx3: err_destroy_tx3:
bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring3); bcm43xx_destroy_dmaring(dma->tx_ring3);
bcm->current_core->dma->tx_ring3 = NULL; dma->tx_ring3 = NULL;
err_destroy_tx2: err_destroy_tx2:
bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring2); bcm43xx_destroy_dmaring(dma->tx_ring2);
bcm->current_core->dma->tx_ring2 = NULL; dma->tx_ring2 = NULL;
err_destroy_tx1: err_destroy_tx1:
bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring1); bcm43xx_destroy_dmaring(dma->tx_ring1);
bcm->current_core->dma->tx_ring1 = NULL; dma->tx_ring1 = NULL;
err_destroy_tx0: err_destroy_tx0:
bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring0); bcm43xx_destroy_dmaring(dma->tx_ring0);
bcm->current_core->dma->tx_ring0 = NULL; dma->tx_ring0 = NULL;
goto out; goto out;
} }
/* Generate a cookie for the TX header. */ /* Generate a cookie for the TX header. */
static inline static u16 generate_cookie(struct bcm43xx_dmaring *ring,
u16 generate_cookie(struct bcm43xx_dmaring *ring, int slot)
int slot)
{ {
u16 cookie = 0x0000; u16 cookie = 0x0000;
...@@ -693,24 +679,25 @@ u16 generate_cookie(struct bcm43xx_dmaring *ring, ...@@ -693,24 +679,25 @@ u16 generate_cookie(struct bcm43xx_dmaring *ring,
} }
/* Inspect a cookie and find out to which controller/slot it belongs. */ /* Inspect a cookie and find out to which controller/slot it belongs. */
static inline static
struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm, struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
u16 cookie, int *slot) u16 cookie, int *slot)
{ {
struct bcm43xx_dma *dma = bcm->current_core->dma;
struct bcm43xx_dmaring *ring = NULL; struct bcm43xx_dmaring *ring = NULL;
switch (cookie & 0xF000) { switch (cookie & 0xF000) {
case 0x0000: case 0x0000:
ring = bcm->current_core->dma->tx_ring0; ring = dma->tx_ring0;
break; break;
case 0x1000: case 0x1000:
ring = bcm->current_core->dma->tx_ring1; ring = dma->tx_ring1;
break; break;
case 0x2000: case 0x2000:
ring = bcm->current_core->dma->tx_ring2; ring = dma->tx_ring2;
break; break;
case 0x3000: case 0x3000:
ring = bcm->current_core->dma->tx_ring3; ring = dma->tx_ring3;
break; break;
default: default:
assert(0); assert(0);
...@@ -721,8 +708,8 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm, ...@@ -721,8 +708,8 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
return ring; return ring;
} }
static inline void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring, static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
int slot) int slot)
{ {
/* Everything is ready to start. Buffers are DMA mapped and /* Everything is ready to start. Buffers are DMA mapped and
* associated with slots. * associated with slots.
...@@ -736,11 +723,10 @@ static inline void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring, ...@@ -736,11 +723,10 @@ static inline void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
(u32)(slot * sizeof(struct bcm43xx_dmadesc))); (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
} }
static inline static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
int dma_tx_fragment(struct bcm43xx_dmaring *ring, struct sk_buff *skb,
struct sk_buff *skb, struct ieee80211_txb *txb,
struct ieee80211_txb *txb, u8 cur_frag)
u8 cur_frag)
{ {
int slot; int slot;
struct bcm43xx_dmadesc *desc; struct bcm43xx_dmadesc *desc;
...@@ -777,7 +763,9 @@ int dma_tx_fragment(struct bcm43xx_dmaring *ring, ...@@ -777,7 +763,9 @@ int dma_tx_fragment(struct bcm43xx_dmaring *ring,
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) { if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) {
return_slot(ring, slot); return_slot(ring, slot);
printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G\n"); printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G "
"(0x%08x, len: %u)\n",
meta->dmaaddr, skb->len);
return -ENOMEM; return -ENOMEM;
} }
...@@ -797,14 +785,15 @@ int dma_tx_fragment(struct bcm43xx_dmaring *ring, ...@@ -797,14 +785,15 @@ int dma_tx_fragment(struct bcm43xx_dmaring *ring,
return 0; return 0;
} }
static inline int dma_transfer_txb(struct bcm43xx_dmaring *ring, int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
struct ieee80211_txb *txb) struct ieee80211_txb *txb)
{ {
/* We just received a packet from the kernel network subsystem. /* We just received a packet from the kernel network subsystem.
* Add headers and DMA map the memory. Poke * Add headers and DMA map the memory. Poke
* the device to send the stuff. * the device to send the stuff.
* Note that this is called from atomic context. * Note that this is called from atomic context.
*/ */
struct bcm43xx_dmaring *ring = bcm->current_core->dma->tx_ring1;
u8 i; u8 i;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -818,8 +807,6 @@ static inline int dma_transfer_txb(struct bcm43xx_dmaring *ring, ...@@ -818,8 +807,6 @@ static inline int dma_transfer_txb(struct bcm43xx_dmaring *ring,
return -ENOMEM; return -ENOMEM;
} }
assert(irqs_disabled());
spin_lock(&ring->lock);
for (i = 0; i < txb->nr_frags; i++) { for (i = 0; i < txb->nr_frags; i++) {
skb = txb->fragments[i]; skb = txb->fragments[i];
/* We do not free the skb, as it is freed as /* We do not free the skb, as it is freed as
...@@ -829,22 +816,12 @@ static inline int dma_transfer_txb(struct bcm43xx_dmaring *ring, ...@@ -829,22 +816,12 @@ static inline int dma_transfer_txb(struct bcm43xx_dmaring *ring,
dma_tx_fragment(ring, skb, txb, i); dma_tx_fragment(ring, skb, txb, i);
//TODO: handle failure of dma_tx_fragment //TODO: handle failure of dma_tx_fragment
} }
spin_unlock(&ring->lock);
return 0; return 0;
} }
int fastcall void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
bcm43xx_dma_transfer_txb(struct bcm43xx_private *bcm, struct bcm43xx_xmitstatus *status)
struct ieee80211_txb *txb)
{
return dma_transfer_txb(bcm->current_core->dma->tx_ring1,
txb);
}
void fastcall
bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
struct bcm43xx_xmitstatus *status)
{ {
struct bcm43xx_dmaring *ring; struct bcm43xx_dmaring *ring;
struct bcm43xx_dmadesc *desc; struct bcm43xx_dmadesc *desc;
...@@ -855,9 +832,6 @@ bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm, ...@@ -855,9 +832,6 @@ bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
ring = parse_cookie(bcm, status->cookie, &slot); ring = parse_cookie(bcm, status->cookie, &slot);
assert(ring); assert(ring);
assert(ring->tx); assert(ring->tx);
assert(irqs_disabled());
spin_lock(&ring->lock);
assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART); assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
while (1) { while (1) {
assert(slot >= 0 && slot < ring->nr_slots); assert(slot >= 0 && slot < ring->nr_slots);
...@@ -877,13 +851,10 @@ bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm, ...@@ -877,13 +851,10 @@ bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
slot = next_slot(ring, slot); slot = next_slot(ring, slot);
} }
bcm->stats.last_tx = jiffies; bcm->stats.last_tx = jiffies;
spin_unlock(&ring->lock);
} }
static inline static void dma_rx(struct bcm43xx_dmaring *ring,
void dma_rx(struct bcm43xx_dmaring *ring, int *slot)
int *slot)
{ {
struct bcm43xx_dmadesc *desc; struct bcm43xx_dmadesc *desc;
struct bcm43xx_dmadesc_meta *meta; struct bcm43xx_dmadesc_meta *meta;
...@@ -928,8 +899,12 @@ void dma_rx(struct bcm43xx_dmaring *ring, ...@@ -928,8 +899,12 @@ void dma_rx(struct bcm43xx_dmaring *ring,
barrier(); barrier();
len = le16_to_cpu(rxhdr->frame_length); len = le16_to_cpu(rxhdr->frame_length);
} while (len == 0 && i++ < 5); } while (len == 0 && i++ < 5);
if (len == 0) if (unlikely(len == 0)) {
/* recycle the descriptor buffer. */
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
goto drop; goto drop;
}
} }
if (unlikely(len > ring->rx_buffersize)) { if (unlikely(len > ring->rx_buffersize)) {
/* The data did not fit into one descriptor buffer /* The data did not fit into one descriptor buffer
...@@ -937,15 +912,24 @@ void dma_rx(struct bcm43xx_dmaring *ring, ...@@ -937,15 +912,24 @@ void dma_rx(struct bcm43xx_dmaring *ring,
* This should never happen, as we try to allocate buffers * This should never happen, as we try to allocate buffers
* big enough. So simply ignore this packet. * big enough. So simply ignore this packet.
*/ */
int cnt = 1; int cnt = 0;
s32 tmp = len - ring->rx_buffersize; s32 tmp = len;
for ( ; tmp > 0; tmp -= ring->rx_buffersize) { while (1) {
desc = ring->vbase + *slot;
meta = ring->meta + *slot;
/* recycle the descriptor buffer. */
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
*slot = next_slot(ring, *slot); *slot = next_slot(ring, *slot);
cnt++; cnt++;
tmp -= ring->rx_buffersize;
if (tmp <= 0)
break;
} }
printkl(KERN_ERR PFX "DMA RX buffer too small. %d dropped.\n", printkl(KERN_ERR PFX "DMA RX buffer too small "
cnt); "(len: %u, buffer: %u, nr-dropped: %d)\n",
len, ring->rx_buffersize, cnt);
goto drop; goto drop;
} }
len -= IEEE80211_FCS_LEN; len -= IEEE80211_FCS_LEN;
...@@ -954,6 +938,8 @@ void dma_rx(struct bcm43xx_dmaring *ring, ...@@ -954,6 +938,8 @@ void dma_rx(struct bcm43xx_dmaring *ring,
err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
if (unlikely(err)) { if (unlikely(err)) {
dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n"); dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
sync_descbuffer_for_device(ring, dmaaddr,
ring->rx_buffersize);
goto drop; goto drop;
} }
...@@ -971,8 +957,7 @@ void dma_rx(struct bcm43xx_dmaring *ring, ...@@ -971,8 +957,7 @@ void dma_rx(struct bcm43xx_dmaring *ring,
return; return;
} }
void fastcall void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
{ {
u32 status; u32 status;
u16 descptr; u16 descptr;
...@@ -982,9 +967,6 @@ bcm43xx_dma_rx(struct bcm43xx_dmaring *ring) ...@@ -982,9 +967,6 @@ bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
#endif #endif
assert(!ring->tx); assert(!ring->tx);
assert(irqs_disabled());
spin_lock(&ring->lock);
status = bcm43xx_read32(ring->bcm, ring->mmio_base + BCM43xx_DMA_RX_STATUS); status = bcm43xx_read32(ring->bcm, ring->mmio_base + BCM43xx_DMA_RX_STATUS);
descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK); descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK);
current_slot = descptr / sizeof(struct bcm43xx_dmadesc); current_slot = descptr / sizeof(struct bcm43xx_dmadesc);
...@@ -1002,8 +984,6 @@ bcm43xx_dma_rx(struct bcm43xx_dmaring *ring) ...@@ -1002,8 +984,6 @@ bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX, ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX,
(u32)(slot * sizeof(struct bcm43xx_dmadesc))); (u32)(slot * sizeof(struct bcm43xx_dmadesc)));
ring->current_slot = slot; ring->current_slot = slot;
spin_unlock(&ring->lock);
} }
/* vim: set ts=8 sw=8 sts=8: */ /* vim: set ts=8 sw=8 sts=8: */
...@@ -122,7 +122,6 @@ struct bcm43xx_dmadesc_meta { ...@@ -122,7 +122,6 @@ struct bcm43xx_dmadesc_meta {
}; };
struct bcm43xx_dmaring { struct bcm43xx_dmaring {
spinlock_t lock;
struct bcm43xx_private *bcm; struct bcm43xx_private *bcm;
/* Kernel virtual base address of the ring memory. */ /* Kernel virtual base address of the ring memory. */
struct bcm43xx_dmadesc *vbase; struct bcm43xx_dmadesc *vbase;
...@@ -166,11 +165,11 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, ...@@ -166,11 +165,11 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
u16 dmacontroller_mmio_base); u16 dmacontroller_mmio_base);
int FASTCALL(bcm43xx_dma_transfer_txb(struct bcm43xx_private *bcm, void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
struct ieee80211_txb *txb)); struct bcm43xx_xmitstatus *status);
void FASTCALL(bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
struct bcm43xx_xmitstatus *status));
void FASTCALL(bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)); int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
struct ieee80211_txb *txb);
void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring);
#endif /* BCM43xx_DMA_H_ */ #endif /* BCM43xx_DMA_H_ */
...@@ -4097,7 +4097,6 @@ int fastcall bcm43xx_rx(struct bcm43xx_private *bcm, ...@@ -4097,7 +4097,6 @@ int fastcall bcm43xx_rx(struct bcm43xx_private *bcm,
} }
frame_ctl = le16_to_cpu(wlhdr->frame_ctl); frame_ctl = le16_to_cpu(wlhdr->frame_ctl);
if ((frame_ctl & IEEE80211_FCTL_PROTECTED) && !bcm->ieee->host_decrypt) { if ((frame_ctl & IEEE80211_FCTL_PROTECTED) && !bcm->ieee->host_decrypt) {
frame_ctl &= ~IEEE80211_FCTL_PROTECTED; frame_ctl &= ~IEEE80211_FCTL_PROTECTED;
wlhdr->frame_ctl = cpu_to_le16(frame_ctl); wlhdr->frame_ctl = cpu_to_le16(frame_ctl);
...@@ -4113,12 +4112,12 @@ int fastcall bcm43xx_rx(struct bcm43xx_private *bcm, ...@@ -4113,12 +4112,12 @@ int fastcall bcm43xx_rx(struct bcm43xx_private *bcm,
skb_trim(skb, skb->len - 4); skb_trim(skb, skb->len - 4);
stats.len -= 8; stats.len -= 8;
} }
/* do _not_ use wlhdr again without reassigning it */ wlhdr = (struct ieee80211_hdr_4addr *)(skb->data);
} }
switch (WLAN_FC_GET_TYPE(frame_ctl)) { switch (WLAN_FC_GET_TYPE(frame_ctl)) {
case IEEE80211_FTYPE_MGMT: case IEEE80211_FTYPE_MGMT:
ieee80211_rx_mgt(bcm->ieee, skb->data, &stats); ieee80211_rx_mgt(bcm->ieee, wlhdr, &stats);
break; break;
case IEEE80211_FTYPE_DATA: case IEEE80211_FTYPE_DATA:
if (is_packet_for_us) if (is_packet_for_us)
...@@ -4143,7 +4142,7 @@ static inline int bcm43xx_tx(struct bcm43xx_private *bcm, ...@@ -4143,7 +4142,7 @@ static inline int bcm43xx_tx(struct bcm43xx_private *bcm,
if (bcm->pio_mode) if (bcm->pio_mode)
err = bcm43xx_pio_transfer_txb(bcm, txb); err = bcm43xx_pio_transfer_txb(bcm, txb);
else else
err = bcm43xx_dma_transfer_txb(bcm, txb); err = bcm43xx_dma_tx(bcm, txb);
return err; return err;
} }
......
...@@ -1161,7 +1161,7 @@ void bcm43xx_phy_lo_b_measure(struct bcm43xx_private *bcm) ...@@ -1161,7 +1161,7 @@ void bcm43xx_phy_lo_b_measure(struct bcm43xx_private *bcm)
phy->minlowsigpos[1] += 0x0101; phy->minlowsigpos[1] += 0x0101;
bcm43xx_phy_write(bcm, 0x002F, phy->minlowsigpos[1]); bcm43xx_phy_write(bcm, 0x002F, phy->minlowsigpos[1]);
if (radio->version == 2053) { if (radio->version == 0x2053) {
bcm43xx_phy_write(bcm, 0x000A, regstack[2]); bcm43xx_phy_write(bcm, 0x000A, regstack[2]);
bcm43xx_phy_write(bcm, 0x002A, regstack[3]); bcm43xx_phy_write(bcm, 0x002A, regstack[3]);
bcm43xx_phy_write(bcm, 0x0035, regstack[4]); bcm43xx_phy_write(bcm, 0x0035, regstack[4]);
......
...@@ -467,8 +467,8 @@ static void bcm43xx_calc_nrssi_offset(struct bcm43xx_private *bcm) ...@@ -467,8 +467,8 @@ static void bcm43xx_calc_nrssi_offset(struct bcm43xx_private *bcm)
bcm43xx_phy_write(bcm, 0x0003, bcm43xx_phy_write(bcm, 0x0003,
(bcm43xx_phy_read(bcm, 0x0003) & 0xFF9F) (bcm43xx_phy_read(bcm, 0x0003) & 0xFF9F)
| 0x0040); | 0x0040);
bcm43xx_phy_write(bcm, 0x007A, bcm43xx_radio_write16(bcm, 0x007A,
bcm43xx_phy_read(bcm, 0x007A) | 0x000F); bcm43xx_radio_read16(bcm, 0x007A) | 0x000F);
bcm43xx_set_all_gains(bcm, 3, 0, 1); bcm43xx_set_all_gains(bcm, 3, 0, 1);
bcm43xx_radio_write16(bcm, 0x0043, bcm43xx_radio_write16(bcm, 0x0043,
(bcm43xx_radio_read16(bcm, 0x0043) (bcm43xx_radio_read16(bcm, 0x0043)
...@@ -761,8 +761,8 @@ void bcm43xx_calc_nrssi_slope(struct bcm43xx_private *bcm) ...@@ -761,8 +761,8 @@ void bcm43xx_calc_nrssi_slope(struct bcm43xx_private *bcm)
bcm43xx_phy_write(bcm, 0x0802, bcm43xx_phy_write(bcm, 0x0802,
bcm43xx_phy_read(bcm, 0x0802) | (0x0001 | 0x0002)); bcm43xx_phy_read(bcm, 0x0802) | (0x0001 | 0x0002));
bcm43xx_set_original_gains(bcm); bcm43xx_set_original_gains(bcm);
bcm43xx_phy_write(bcm, 0x0802, bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS,
bcm43xx_phy_read(bcm, 0x0802) | 0x8000); bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS) | 0x8000);
if (phy->rev >= 3) { if (phy->rev >= 3) {
bcm43xx_phy_write(bcm, 0x0801, backup[14]); bcm43xx_phy_write(bcm, 0x0801, backup[14]);
bcm43xx_phy_write(bcm, 0x0060, backup[15]); bcm43xx_phy_write(bcm, 0x0060, backup[15]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment