Commit cc861f74 authored by Luis R. Rodriguez's avatar Luis R. Rodriguez Committed by John W. Linville

ath: move the rx bufsize to common to share with ath5k/ath9k

This will also be used by ath9k_htc.
Signed-off-by: default avatarLuis R. Rodriguez <lrodriguez@atheros.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 0a45da76
...@@ -87,6 +87,8 @@ struct ath_common { ...@@ -87,6 +87,8 @@ struct ath_common {
u8 tx_chainmask; u8 tx_chainmask;
u8 rx_chainmask; u8 rx_chainmask;
u32 rx_bufsize;
struct ath_regulatory regulatory; struct ath_regulatory regulatory;
const struct ath_ops *ops; const struct ath_ops *ops;
const struct ath_bus_ops *bus_ops; const struct ath_bus_ops *bus_ops;
......
...@@ -323,10 +323,13 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc, ...@@ -323,10 +323,13 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
static inline void ath5k_rxbuf_free(struct ath5k_softc *sc, static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
struct ath5k_buf *bf) struct ath5k_buf *bf)
{ {
struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
BUG_ON(!bf); BUG_ON(!bf);
if (!bf->skb) if (!bf->skb)
return; return;
pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(bf->skb); dev_kfree_skb_any(bf->skb);
bf->skb = NULL; bf->skb = NULL;
...@@ -1181,17 +1184,18 @@ struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr) ...@@ -1181,17 +1184,18 @@ struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
* fake physical layer header at the start. * fake physical layer header at the start.
*/ */
skb = ath_rxbuf_alloc(common, skb = ath_rxbuf_alloc(common,
sc->rxbufsize + common->cachelsz - 1, common->rx_bufsize + common->cachelsz - 1,
GFP_ATOMIC); GFP_ATOMIC);
if (!skb) { if (!skb) {
ATH5K_ERR(sc, "can't alloc skbuff of size %u\n", ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
sc->rxbufsize + common->cachelsz - 1); common->rx_bufsize + common->cachelsz - 1);
return NULL; return NULL;
} }
*skb_addr = pci_map_single(sc->pdev, *skb_addr = pci_map_single(sc->pdev,
skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); skb->data, common->rx_bufsize,
PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) { if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb); dev_kfree_skb(skb);
...@@ -1631,10 +1635,10 @@ ath5k_rx_start(struct ath5k_softc *sc) ...@@ -1631,10 +1635,10 @@ ath5k_rx_start(struct ath5k_softc *sc)
struct ath5k_buf *bf; struct ath5k_buf *bf;
int ret; int ret;
sc->rxbufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz); common->rx_bufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz);
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rxbufsize %u\n", ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
common->cachelsz, sc->rxbufsize); common->cachelsz, common->rx_bufsize);
spin_lock_bh(&sc->rxbuflock); spin_lock_bh(&sc->rxbuflock);
sc->rxlink = NULL; sc->rxlink = NULL;
...@@ -1769,6 +1773,8 @@ ath5k_tasklet_rx(unsigned long data) ...@@ -1769,6 +1773,8 @@ ath5k_tasklet_rx(unsigned long data)
struct sk_buff *skb, *next_skb; struct sk_buff *skb, *next_skb;
dma_addr_t next_skb_addr; dma_addr_t next_skb_addr;
struct ath5k_softc *sc = (void *)data; struct ath5k_softc *sc = (void *)data;
struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
struct ath5k_buf *bf; struct ath5k_buf *bf;
struct ath5k_desc *ds; struct ath5k_desc *ds;
int ret; int ret;
...@@ -1846,7 +1852,7 @@ ath5k_tasklet_rx(unsigned long data) ...@@ -1846,7 +1852,7 @@ ath5k_tasklet_rx(unsigned long data)
if (!next_skb) if (!next_skb)
goto next; goto next;
pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
skb_put(skb, rs.rs_datalen); skb_put(skb, rs.rs_datalen);
......
...@@ -323,7 +323,6 @@ struct ath_rx { ...@@ -323,7 +323,6 @@ struct ath_rx {
u8 defant; u8 defant;
u8 rxotherant; u8 rxotherant;
u32 *rxlink; u32 *rxlink;
int bufsize;
unsigned int rxfilter; unsigned int rxfilter;
spinlock_t rxflushlock; spinlock_t rxflushlock;
spinlock_t rxbuflock; spinlock_t rxbuflock;
......
...@@ -48,6 +48,7 @@ static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, ...@@ -48,6 +48,7 @@ static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
{ {
struct ath_hw *ah = sc->sc_ah; struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_desc *ds; struct ath_desc *ds;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -62,11 +63,13 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) ...@@ -62,11 +63,13 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
BUG_ON(skb == NULL); BUG_ON(skb == NULL);
ds->ds_vdata = skb->data; ds->ds_vdata = skb->data;
/* setup rx descriptors. The rx.bufsize here tells the harware /*
* setup rx descriptors. The rx_bufsize here tells the hardware
* how much data it can DMA to us and that we are prepared * how much data it can DMA to us and that we are prepared
* to process */ * to process
*/
ath9k_hw_setuprxdesc(ah, ds, ath9k_hw_setuprxdesc(ah, ds,
sc->rx.bufsize, common->rx_bufsize,
0); 0);
if (sc->rx.rxlink == NULL) if (sc->rx.rxlink == NULL)
...@@ -344,11 +347,11 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) ...@@ -344,11 +347,11 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
sc->sc_flags &= ~SC_OP_RXFLUSH; sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_lock_init(&sc->rx.rxbuflock); spin_lock_init(&sc->rx.rxbuflock);
sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN, common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
min(common->cachelsz, (u16)64)); min(common->cachelsz, (u16)64));
ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
common->cachelsz, sc->rx.bufsize); common->cachelsz, common->rx_bufsize);
/* Initialize rx descriptors */ /* Initialize rx descriptors */
...@@ -361,7 +364,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) ...@@ -361,7 +364,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
} }
list_for_each_entry(bf, &sc->rx.rxbuf, list) { list_for_each_entry(bf, &sc->rx.rxbuf, list) {
skb = ath_rxbuf_alloc(common, sc->rx.bufsize, GFP_KERNEL); skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
if (skb == NULL) { if (skb == NULL) {
error = -ENOMEM; error = -ENOMEM;
goto err; goto err;
...@@ -369,7 +372,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) ...@@ -369,7 +372,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
bf->bf_mpdu = skb; bf->bf_mpdu = skb;
bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
sc->rx.bufsize, common->rx_bufsize,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(sc->dev, if (unlikely(dma_mapping_error(sc->dev,
bf->bf_buf_addr))) { bf->bf_buf_addr))) {
...@@ -393,6 +396,8 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) ...@@ -393,6 +396,8 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
void ath_rx_cleanup(struct ath_softc *sc) void ath_rx_cleanup(struct ath_softc *sc)
{ {
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct sk_buff *skb; struct sk_buff *skb;
struct ath_buf *bf; struct ath_buf *bf;
...@@ -400,7 +405,7 @@ void ath_rx_cleanup(struct ath_softc *sc) ...@@ -400,7 +405,7 @@ void ath_rx_cleanup(struct ath_softc *sc)
skb = bf->bf_mpdu; skb = bf->bf_mpdu;
if (skb) { if (skb) {
dma_unmap_single(sc->dev, bf->bf_buf_addr, dma_unmap_single(sc->dev, bf->bf_buf_addr,
sc->rx.bufsize, DMA_FROM_DEVICE); common->rx_bufsize, DMA_FROM_DEVICE);
dev_kfree_skb(skb); dev_kfree_skb(skb);
} }
} }
...@@ -780,7 +785,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) ...@@ -780,7 +785,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
* 2. requeueing the same buffer to h/w * 2. requeueing the same buffer to h/w
*/ */
dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
sc->rx.bufsize, common->rx_bufsize,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
hdr = (struct ieee80211_hdr *) skb->data; hdr = (struct ieee80211_hdr *) skb->data;
...@@ -797,7 +802,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) ...@@ -797,7 +802,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
goto requeue; goto requeue;
/* The status portion of the descriptor could get corrupted. */ /* The status portion of the descriptor could get corrupted. */
if (sc->rx.bufsize < rx_stats->rs_datalen) if (common->rx_bufsize < rx_stats->rs_datalen)
goto requeue; goto requeue;
if (!ath_rx_prepare(common, hw, skb, rx_stats, if (!ath_rx_prepare(common, hw, skb, rx_stats,
...@@ -806,7 +811,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) ...@@ -806,7 +811,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
/* Ensure we always have an skb to requeue once we are done /* Ensure we always have an skb to requeue once we are done
* processing the current buffer's skb */ * processing the current buffer's skb */
requeue_skb = ath_rxbuf_alloc(common, sc->rx.bufsize, GFP_ATOMIC); requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
/* If there is no memory we ignore the current RX'd frame, /* If there is no memory we ignore the current RX'd frame,
* tell hardware it can give us a new frame using the old * tell hardware it can give us a new frame using the old
...@@ -817,7 +822,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) ...@@ -817,7 +822,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
/* Unmap the frame */ /* Unmap the frame */
dma_unmap_single(sc->dev, bf->bf_buf_addr, dma_unmap_single(sc->dev, bf->bf_buf_addr,
sc->rx.bufsize, common->rx_bufsize,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
skb_put(skb, rx_stats->rs_datalen); skb_put(skb, rx_stats->rs_datalen);
...@@ -860,7 +865,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) ...@@ -860,7 +865,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
/* We will now give hardware our shiny new allocated skb */ /* We will now give hardware our shiny new allocated skb */
bf->bf_mpdu = requeue_skb; bf->bf_mpdu = requeue_skb;
bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
sc->rx.bufsize, common->rx_bufsize,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(sc->dev, if (unlikely(dma_mapping_error(sc->dev,
bf->bf_buf_addr))) { bf->bf_buf_addr))) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment