Commit 9bd568a5 authored by Michael Buesch's avatar Michael Buesch Committed by John W. Linville

b43: Enforce DMA descriptor memory constraints

Enforce all device constraints on the descriptor memory region.

There are several constraints on the descriptor memory, as documented
in the specification. The current code does not enforce them and/or
incorrectly enforces them.

Those constraints are:
- The address limitations on 30/32bit engines, that also apply to
  the skbs.
- The 4k alignment requirement on 30/32bit engines.
- The 8k alignment requirement on 64bit engines.
Signed-off-by: default avatarMichael Buesch <mb@bu3sch.de>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 76aa5e70
...@@ -383,44 +383,160 @@ static inline ...@@ -383,44 +383,160 @@ static inline
} }
} }
/* Check if a DMA region fits the device constraints.
* Returns true, if the region is OK for usage with this device. */
static inline bool b43_dma_address_ok(struct b43_dmaring *ring,
dma_addr_t addr, size_t size)
{
switch (ring->type) {
case B43_DMA_30BIT:
if ((u64)addr + size > (1ULL << 30))
return 0;
break;
case B43_DMA_32BIT:
if ((u64)addr + size > (1ULL << 32))
return 0;
break;
case B43_DMA_64BIT:
/* Currently we can't have addresses beyond
* 64bit in the kernel. */
break;
}
return 1;
}
#define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0)
#define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0)
static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base,
dma_addr_t dmaaddr, size_t size)
{
ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE);
free_pages((unsigned long)base, get_order(size));
}
static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring,
dma_addr_t *dmaaddr, size_t size,
gfp_t gfp_flags)
{
void *base;
base = (void *)__get_free_pages(gfp_flags, get_order(size));
if (!base)
return NULL;
memset(base, 0, size);
*dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size,
DMA_TO_DEVICE);
if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) {
free_pages((unsigned long)base, get_order(size));
return NULL;
}
return base;
}
static void * b43_get_and_map_ringmem(struct b43_dmaring *ring,
dma_addr_t *dmaaddr, size_t size)
{
void *base;
base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
GFP_KERNEL);
if (!base) {
b43err(ring->dev->wl, "Failed to allocate or map pages "
"for DMA ringmemory\n");
return NULL;
}
if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
/* The memory does not fit our device constraints.
* Retry with GFP_DMA set to get lower memory. */
b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
GFP_KERNEL | GFP_DMA);
if (!base) {
b43err(ring->dev->wl, "Failed to allocate or map pages "
"in the GFP_DMA region for DMA ringmemory\n");
return NULL;
}
if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
b43err(ring->dev->wl, "Failed to allocate DMA "
"ringmemory that fits device constraints\n");
return NULL;
}
}
/* We expect the memory to be 4k aligned, at least. */
if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) {
b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
return NULL;
}
return base;
}
static int alloc_ringmemory(struct b43_dmaring *ring) static int alloc_ringmemory(struct b43_dmaring *ring)
{ {
gfp_t flags = GFP_KERNEL; unsigned int required;
void *base;
/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K dma_addr_t dmaaddr;
* alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
* has shown that 4K is sufficient for the latter as long as the buffer /* There are several requirements to the descriptor ring memory:
* does not cross an 8K boundary. * - The memory region needs to fit the address constraints for the
* * device (same as for frame buffers).
* For unknown reasons - possibly a hardware error - the BCM4311 rev * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned.
* 02, which uses 64-bit DMA, needs the ring buffer in very low memory, * - For 64bit DMA devices, the descriptor ring must be 8k aligned.
* which accounts for the GFP_DMA flag below.
*
* The flags here must match the flags in free_ringmemory below!
*/ */
if (ring->type == B43_DMA_64BIT) if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA; required = ring->nr_slots * sizeof(struct b43_dmadesc64);
ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, else
B43_DMA_RINGMEMSIZE, required = ring->nr_slots * sizeof(struct b43_dmadesc32);
&(ring->dmabase), flags); if (B43_WARN_ON(required > 0x1000))
if (!ring->descbase) { return -ENOMEM;
b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
ring->alloc_descsize = 0x1000;
base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
if (!base)
return -ENOMEM;
ring->alloc_descbase = base;
ring->alloc_dmabase = dmaaddr;
if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) {
/* We're on <=32bit DMA, or we already got 8k aligned memory.
* That's all we need, so we're fine. */
ring->descbase = base;
ring->dmabase = dmaaddr;
return 0;
}
b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize);
/* Ok, we failed at the 8k alignment requirement.
* Try to force-align the memory region now. */
ring->alloc_descsize = 0x2000;
base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
if (!base)
return -ENOMEM; return -ENOMEM;
ring->alloc_descbase = base;
ring->alloc_dmabase = dmaaddr;
if (is_8k_aligned(dmaaddr)) {
/* We're already 8k aligned. That Ok, too. */
ring->descbase = base;
ring->dmabase = dmaaddr;
return 0;
} }
memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); /* Force-align it to 8k */
ring->descbase = (void *)((u8 *)base + 0x1000);
ring->dmabase = dmaaddr + 0x1000;
B43_WARN_ON(!is_8k_aligned(ring->dmabase));
return 0; return 0;
} }
static void free_ringmemory(struct b43_dmaring *ring) static void free_ringmemory(struct b43_dmaring *ring)
{ {
gfp_t flags = GFP_KERNEL; b43_unmap_and_free_ringmem(ring, ring->alloc_descbase,
ring->alloc_dmabase, ring->alloc_descsize);
if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA;
ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
ring->descbase, ring->dmabase, flags);
} }
/* Reset the RX DMA channel */ /* Reset the RX DMA channel */
...@@ -530,29 +646,14 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring, ...@@ -530,29 +646,14 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
return 1; return 1;
switch (ring->type) { if (!b43_dma_address_ok(ring, addr, buffersize)) {
case B43_DMA_30BIT: /* We can't support this address. Unmap it again. */
if ((u64)addr + buffersize > (1ULL << 30)) unmap_descbuffer(ring, addr, buffersize, dma_to_device);
goto address_error; return 1;
break;
case B43_DMA_32BIT:
if ((u64)addr + buffersize > (1ULL << 32))
goto address_error;
break;
case B43_DMA_64BIT:
/* Currently we can't have addresses beyond
* 64bit in the kernel. */
break;
} }
/* The address is OK. */ /* The address is OK. */
return 0; return 0;
address_error:
/* We can't support this address. Unmap it again. */
unmap_descbuffer(ring, addr, buffersize, dma_to_device);
return 1;
} }
static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
...@@ -614,6 +715,9 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, ...@@ -614,6 +715,9 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
meta->dmaaddr = dmaaddr; meta->dmaaddr = dmaaddr;
ring->ops->fill_descriptor(ring, desc, dmaaddr, ring->ops->fill_descriptor(ring, desc, dmaaddr,
ring->rx_buffersize, 0, 0, 0); ring->rx_buffersize, 0, 0, 0);
ssb_dma_sync_single_for_device(ring->dev->dev,
ring->alloc_dmabase,
ring->alloc_descsize, DMA_TO_DEVICE);
return 0; return 0;
} }
...@@ -1246,6 +1350,9 @@ static int dma_tx_fragment(struct b43_dmaring *ring, ...@@ -1246,6 +1350,9 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
} }
/* Now transfer the whole frame. */ /* Now transfer the whole frame. */
wmb(); wmb();
ssb_dma_sync_single_for_device(ring->dev->dev,
ring->alloc_dmabase,
ring->alloc_descsize, DMA_TO_DEVICE);
ops->poke_tx(ring, next_slot(ring, slot)); ops->poke_tx(ring, next_slot(ring, slot));
return 0; return 0;
......
...@@ -157,7 +157,6 @@ struct b43_dmadesc_generic { ...@@ -157,7 +157,6 @@ struct b43_dmadesc_generic {
} __attribute__ ((__packed__)); } __attribute__ ((__packed__));
/* Misc DMA constants */ /* Misc DMA constants */
#define B43_DMA_RINGMEMSIZE PAGE_SIZE
#define B43_DMA0_RX_FRAMEOFFSET 30 #define B43_DMA0_RX_FRAMEOFFSET 30
/* DMA engine tuning knobs */ /* DMA engine tuning knobs */
...@@ -243,6 +242,12 @@ struct b43_dmaring { ...@@ -243,6 +242,12 @@ struct b43_dmaring {
/* The QOS priority assigned to this ring. Only used for TX rings. /* The QOS priority assigned to this ring. Only used for TX rings.
* This is the mac80211 "queue" value. */ * This is the mac80211 "queue" value. */
u8 queue_prio; u8 queue_prio;
/* Pointers and size of the originally allocated and mapped memory
* region for the descriptor ring. */
void *alloc_descbase;
dma_addr_t alloc_dmabase;
unsigned int alloc_descsize;
/* Pointer to our wireless device. */
struct b43_wldev *dev; struct b43_wldev *dev;
#ifdef CONFIG_B43_DEBUG #ifdef CONFIG_B43_DEBUG
/* Maximum number of used slots. */ /* Maximum number of used slots. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment