Commit e041f65d authored by Seth Forshee's avatar Seth Forshee Committed by John W. Linville

brcmsmac: Remove internal tx queue

The brcmsmac internal tx buffering is problematic. The amount of
buffering is excessive (228 packets in addition to the 256 slots in each
DMA ring), and frames may be dropped due to a lack of flow control.

This patch reworks the transmit code path to remove the internal
buffering. Frames are immediately handed off to the DMA support rather
than passing through an intermediate queue. Non-aggregate frames are
queued immediately into the tx rings, and aggregate frames are queued
temporarily in an AMPDU session until ready for transmit.

Transmit flow control is also added to avoid dropping packets when the
tx rings are full. Conceptually this is a separate change, but it's
included in this commit because removing the tx queue without adding
flow control could cause significant problems.
Signed-off-by: default avatarSeth Forshee <seth.forshee@canonical.com>
Reviewed-by: default avatarPieter-Paul Giesberts <pieterpg@broadcom.com>
Reviewed-by: default avatarArend van Spriel <arend@broadcom.com>
Tested-by: default avatarDaniel Wagner <wagi@monom.org>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 32d0f12a
...@@ -813,133 +813,6 @@ void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session) ...@@ -813,133 +813,6 @@ void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session)
session->ampdu_len); session->ampdu_len);
} }
int
brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
struct sk_buff **pdu, int prec)
{
struct brcms_c_info *wlc;
struct sk_buff *p;
struct brcms_ampdu_session session;
int err = 0;
u8 tid;
uint count, fifo, seg_cnt = 0;
struct scb *scb;
struct scb_ampdu *scb_ampdu;
struct scb_ampdu_tid_ini *ini;
struct ieee80211_tx_info *tx_info;
u16 qlen;
struct wiphy *wiphy;
wlc = ampdu->wlc;
wiphy = wlc->wiphy;
p = *pdu;
tid = (u8) (p->priority);
scb = &wlc->pri_scb;
scb_ampdu = &scb->scb_ampdu;
ini = &scb_ampdu->ini[tid];
/* Let pressure continue to build ... */
qlen = pktq_plen(&qi->q, prec);
if (ini->tx_in_transit > 0 &&
qlen < min(scb_ampdu->max_pdu, ini->ba_wsize))
/* Collect multiple MPDU's to be sent in the next AMPDU */
return -EBUSY;
/* at this point we intend to transmit an AMPDU */
brcms_c_ampdu_reset_session(&session, wlc);
while (p) {
struct ieee80211_tx_rate *txrate;
tx_info = IEEE80211_SKB_CB(p);
txrate = tx_info->status.rates;
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
err = brcms_c_prep_pdu(wlc, p, &fifo);
} else {
wiphy_err(wiphy, "%s: AMPDU flag is off!\n", __func__);
*pdu = NULL;
err = 0;
break;
}
if (err) {
if (err == -EBUSY) {
wiphy_err(wiphy, "wl%d: sendampdu: "
"prep_xdu retry\n", wlc->pub->unit);
*pdu = p;
break;
}
/* error in the packet; reject it */
wiphy_err(wiphy, "wl%d: sendampdu: prep_xdu "
"rejected\n", wlc->pub->unit);
*pdu = NULL;
break;
}
err = brcms_c_ampdu_add_frame(&session, p);
if (err == -ENOSPC) {
/*
* No space for this packet in the AMPDU.
* Requeue packet and proceed;
*/
*pdu = p;
break;
} else if (err) {
/* Unexpected error; reject packet */
wiphy_err(wiphy, "wl%d: sendampdu: add_frame rejected",
wlc->pub->unit);
*pdu = NULL;
break;
}
seg_cnt += 1;
/*
* check to see if the next pkt is
* a candidate for aggregation
*/
p = pktq_ppeek(&qi->q, prec);
if (p) {
tx_info = IEEE80211_SKB_CB(p);
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
/*
* check if there are enough
* descriptors available
*/
if (*wlc->core->txavail[fifo] <= seg_cnt + 1) {
wiphy_err(wiphy, "%s: No fifo space "
"!!\n", __func__);
p = NULL;
continue;
}
/* next packet fit for aggregation so dequeue */
p = brcmu_pktq_pdeq(&qi->q, prec);
} else {
p = NULL;
}
}
} /* end while(p) */
count = skb_queue_len(&session.skb_list);
ini->tx_in_transit += count;
if (count) {
/* patch up first and last txh's */
brcms_c_ampdu_finalize(&session);
while ((p = skb_dequeue(&session.skb_list)) != NULL)
brcms_c_txfifo(wlc, fifo, p,
skb_queue_empty(&session.skb_list));
}
/* endif (count) */
return err;
}
static void static void
brcms_c_ampdu_rate_status(struct brcms_c_info *wlc, brcms_c_ampdu_rate_status(struct brcms_c_info *wlc,
struct ieee80211_tx_info *tx_info, struct ieee80211_tx_info *tx_info,
...@@ -1113,9 +986,16 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb, ...@@ -1113,9 +986,16 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
/* either retransmit or send bar if ack not recd */ /* either retransmit or send bar if ack not recd */
if (!ack_recd) { if (!ack_recd) {
if (retry && (ini->txretry[index] < (int)retry_limit)) { if (retry && (ini->txretry[index] < (int)retry_limit)) {
int ret;
ini->txretry[index]++; ini->txretry[index]++;
ini->tx_in_transit--; ini->tx_in_transit--;
brcms_c_txq_enq(wlc, scb, p); ret = brcms_c_txfifo(wlc, queue, p);
/*
* We shouldn't be out of space in the DMA
* ring here since we're reinserting a frame
* that was just pulled out.
*/
WARN_ONCE(ret, "queue %d out of txds\n", queue);
} else { } else {
/* Retry timeout */ /* Retry timeout */
ini->tx_in_transit--; ini->tx_in_transit--;
...@@ -1142,12 +1022,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb, ...@@ -1142,12 +1022,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED); p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
} }
brcms_c_send_q(wlc);
/* update rate state */ /* update rate state */
antselid = brcms_c_antsel_antsel2id(wlc->asi, mimoantsel); antselid = brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
brcms_c_txfifo_complete(wlc, queue);
} }
void void
...@@ -1204,7 +1081,6 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb, ...@@ -1204,7 +1081,6 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
p = dma_getnexttxp(wlc->hw->di[queue], p = dma_getnexttxp(wlc->hw->di[queue],
DMA_RANGE_TRANSMITTED); DMA_RANGE_TRANSMITTED);
} }
brcms_c_txfifo_complete(wlc, queue);
} }
} }
...@@ -1243,23 +1119,6 @@ void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu) ...@@ -1243,23 +1119,6 @@ void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu)
} }
} }
/*
* callback function that helps flushing ampdu packets from a priority queue
*/
static bool cb_del_ampdu_pkt(struct sk_buff *mpdu, void *arg_a)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(mpdu);
struct cb_del_ampdu_pars *ampdu_pars =
(struct cb_del_ampdu_pars *)arg_a;
bool rc;
rc = tx_info->flags & IEEE80211_TX_CTL_AMPDU ? true : false;
rc = rc && (tx_info->rate_driver_data[0] == NULL || ampdu_pars->sta == NULL ||
tx_info->rate_driver_data[0] == ampdu_pars->sta);
rc = rc && ((u8)(mpdu->priority) == ampdu_pars->tid);
return rc;
}
/* /*
* callback function that helps invalidating ampdu packets in a DMA queue * callback function that helps invalidating ampdu packets in a DMA queue
*/ */
...@@ -1280,15 +1139,5 @@ static void dma_cb_fn_ampdu(void *txi, void *arg_a) ...@@ -1280,15 +1139,5 @@ static void dma_cb_fn_ampdu(void *txi, void *arg_a)
void brcms_c_ampdu_flush(struct brcms_c_info *wlc, void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
struct ieee80211_sta *sta, u16 tid) struct ieee80211_sta *sta, u16 tid)
{ {
struct brcms_txq_info *qi = wlc->pkt_queue;
struct pktq *pq = &qi->q;
int prec;
struct cb_del_ampdu_pars ampdu_pars;
ampdu_pars.sta = sta;
ampdu_pars.tid = tid;
for (prec = 0; prec < pq->num_prec; prec++)
brcmu_pktq_pflush(pq, prec, true, cb_del_ampdu_pkt,
(void *)&ampdu_pars);
brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu); brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu);
} }
...@@ -45,9 +45,6 @@ extern void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session); ...@@ -45,9 +45,6 @@ extern void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc); extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu); extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
extern int brcms_c_sendampdu(struct ampdu_info *ampdu,
struct brcms_txq_info *qi,
struct sk_buff **aggp, int prec);
extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb, extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
struct sk_buff *p, struct tx_status *txs); struct sk_buff *p, struct tx_status *txs);
extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc); extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
......
...@@ -19,12 +19,17 @@ ...@@ -19,12 +19,17 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <brcmu_utils.h> #include <brcmu_utils.h>
#include <aiutils.h> #include <aiutils.h>
#include "types.h" #include "types.h"
#include "main.h"
#include "dma.h" #include "dma.h"
#include "soc.h" #include "soc.h"
#include "scb.h"
#include "ampdu.h"
/* /*
* dma register field offset calculation * dma register field offset calculation
...@@ -230,6 +235,9 @@ struct dma_info { ...@@ -230,6 +235,9 @@ struct dma_info {
struct bcma_device *core; struct bcma_device *core;
struct device *dmadev; struct device *dmadev;
/* session information for AMPDU */
struct brcms_ampdu_session ampdu_session;
bool dma64; /* this dma engine is operating in 64-bit mode */ bool dma64; /* this dma engine is operating in 64-bit mode */
bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
...@@ -564,12 +572,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction) ...@@ -564,12 +572,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction)
return dma64_alloc(di, direction); return dma64_alloc(di, direction);
} }
struct dma_pub *dma_attach(char *name, struct si_pub *sih, struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
struct bcma_device *core,
uint txregbase, uint rxregbase, uint ntxd, uint nrxd, uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
uint rxbufsize, int rxextheadroom, uint rxbufsize, int rxextheadroom,
uint nrxpost, uint rxoffset, uint *msg_level) uint nrxpost, uint rxoffset, uint *msg_level)
{ {
struct si_pub *sih = wlc->hw->sih;
struct bcma_device *core = wlc->hw->d11core;
struct dma_info *di; struct dma_info *di;
u8 rev = core->id.rev; u8 rev = core->id.rev;
uint size; uint size;
...@@ -714,6 +723,9 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, ...@@ -714,6 +723,9 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
} }
} }
/* Initialize AMPDU session */
brcms_c_ampdu_reset_session(&di->ampdu_session, wlc);
DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n", DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
di->ddoffsetlow, di->ddoffsethigh, di->ddoffsetlow, di->ddoffsethigh,
di->dataoffsetlow, di->dataoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
...@@ -1016,6 +1028,17 @@ static bool dma64_rxidle(struct dma_info *di) ...@@ -1016,6 +1028,17 @@ static bool dma64_rxidle(struct dma_info *di)
D64_RS0_CD_MASK)); D64_RS0_CD_MASK));
} }
static bool dma64_txidle(struct dma_info *di)
{
if (di->ntxd == 0)
return true;
return ((bcma_read32(di->core,
DMA64TXREGOFFS(di, status0)) & D64_XS0_CD_MASK) ==
(bcma_read32(di->core, DMA64TXREGOFFS(di, ptr)) &
D64_XS0_CD_MASK));
}
/* /*
* post receive buffers * post receive buffers
* return false is refill failed completely and ring is empty this will stall * return false is refill failed completely and ring is empty this will stall
...@@ -1264,50 +1287,25 @@ bool dma_rxreset(struct dma_pub *pub) ...@@ -1264,50 +1287,25 @@ bool dma_rxreset(struct dma_pub *pub)
return status == D64_RS0_RS_DISABLED; return status == D64_RS0_RS_DISABLED;
} }
/* Update count of available tx descriptors based on current DMA state */ static void dma_txenq(struct dma_info *di, struct sk_buff *p)
static void dma_update_txavail(struct dma_info *di)
{ {
/*
* Available space is number of descriptors less the number of
* active descriptors and the number of queued AMPDU frames.
*/
di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1;
}
/*
* !! tx entry routine
* WARNING: call must check the return value for error.
* the error(toss frames) could be fatal and cause many subsequent hard
* to debug problems
*/
int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
{
struct dma_info *di = (struct dma_info *)pub;
unsigned char *data; unsigned char *data;
uint len; uint len;
u16 txout; u16 txout;
u32 flags = 0; u32 flags = 0;
dma_addr_t pa; dma_addr_t pa;
DMA_TRACE("%s:\n", di->name);
txout = di->txout; txout = di->txout;
if (WARN_ON(nexttxd(di, txout) == di->txin))
return;
/* /*
* obtain and initialize transmit descriptor entry. * obtain and initialize transmit descriptor entry.
*/ */
data = p->data; data = p->data;
len = p->len; len = p->len;
/* no use to transmit a zero length packet */
if (len == 0)
return 0;
/* return nonzero if out of tx descriptors */
if (nexttxd(di, txout) == di->txin)
goto outoftxd;
/* get physical address of buffer start */ /* get physical address of buffer start */
pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE); pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
...@@ -1329,15 +1327,106 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit) ...@@ -1329,15 +1327,106 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
/* bump the tx descriptor index */ /* bump the tx descriptor index */
di->txout = txout; di->txout = txout;
}
/* kick the chip */ static void ampdu_finalize(struct dma_info *di)
if (commit) {
bcma_write32(di->core, DMA64TXREGOFFS(di, ptr), struct brcms_ampdu_session *session = &di->ampdu_session;
di->xmtptrbase + I2B(txout, struct dma64desc)); struct sk_buff *p;
if (WARN_ON(skb_queue_empty(&session->skb_list)))
return;
brcms_c_ampdu_finalize(session);
while (!skb_queue_empty(&session->skb_list)) {
p = skb_dequeue(&session->skb_list);
dma_txenq(di, p);
}
bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
di->xmtptrbase + I2B(di->txout, struct dma64desc));
brcms_c_ampdu_reset_session(session, session->wlc);
}
static void prep_ampdu_frame(struct dma_info *di, struct sk_buff *p)
{
struct brcms_ampdu_session *session = &di->ampdu_session;
int ret;
ret = brcms_c_ampdu_add_frame(session, p);
if (ret == -ENOSPC) {
/*
* AMPDU cannot accomodate this frame. Close out the in-
* progress AMPDU session and start a new one.
*/
ampdu_finalize(di);
ret = brcms_c_ampdu_add_frame(session, p);
}
WARN_ON(ret);
}
/* Update count of available tx descriptors based on current DMA state */
static void dma_update_txavail(struct dma_info *di)
{
/*
* Available space is number of descriptors less the number of
* active descriptors and the number of queued AMPDU frames.
*/
di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) -
skb_queue_len(&di->ampdu_session.skb_list) - 1;
}
/*
* !! tx entry routine
* WARNING: call must check the return value for error.
* the error(toss frames) could be fatal and cause many subsequent hard
* to debug problems
*/
int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
struct sk_buff *p)
{
struct dma_info *di = (struct dma_info *)pub;
struct brcms_ampdu_session *session = &di->ampdu_session;
struct ieee80211_tx_info *tx_info;
bool is_ampdu;
DMA_TRACE("%s:\n", di->name);
/* no use to transmit a zero length packet */
if (p->len == 0)
return 0;
/* return nonzero if out of tx descriptors */
if (di->dma.txavail == 0 || nexttxd(di, di->txout) == di->txin)
goto outoftxd;
tx_info = IEEE80211_SKB_CB(p);
is_ampdu = tx_info->flags & IEEE80211_TX_CTL_AMPDU;
if (is_ampdu)
prep_ampdu_frame(di, p);
else
dma_txenq(di, p);
/* tx flow control */ /* tx flow control */
dma_update_txavail(di); dma_update_txavail(di);
/* kick the chip */
if (is_ampdu) {
/*
* Start sending data if we've got a full AMPDU, there's
* no more space in the DMA ring, or the ring isn't
* currently transmitting.
*/
if (skb_queue_len(&session->skb_list) == session->max_ampdu_frames ||
di->dma.txavail == 0 || dma64_txidle(di))
ampdu_finalize(di);
} else {
bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
di->xmtptrbase + I2B(di->txout, struct dma64desc));
}
return 0; return 0;
outoftxd: outoftxd:
...@@ -1345,7 +1434,35 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit) ...@@ -1345,7 +1434,35 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
brcmu_pkt_buf_free_skb(p); brcmu_pkt_buf_free_skb(p);
di->dma.txavail = 0; di->dma.txavail = 0;
di->dma.txnobuf++; di->dma.txnobuf++;
return -1; return -ENOSPC;
}
void dma_txflush(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
struct brcms_ampdu_session *session = &di->ampdu_session;
if (!skb_queue_empty(&session->skb_list))
ampdu_finalize(di);
}
int dma_txpending(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
return ntxdactive(di, di->txin, di->txout);
}
/*
* If we have an active AMPDU session and are not transmitting,
* this function will force tx to start.
*/
void dma_kick_tx(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
struct brcms_ampdu_session *session = &di->ampdu_session;
if (!skb_queue_empty(&session->skb_list) && dma64_txidle(di))
ampdu_finalize(di);
} }
/* /*
......
...@@ -74,8 +74,7 @@ struct dma_pub { ...@@ -74,8 +74,7 @@ struct dma_pub {
uint txnobuf; /* tx out of dma descriptors */ uint txnobuf; /* tx out of dma descriptors */
}; };
extern struct dma_pub *dma_attach(char *name, struct si_pub *sih, extern struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
struct bcma_device *d11core,
uint txregbase, uint rxregbase, uint txregbase, uint rxregbase,
uint ntxd, uint nrxd, uint ntxd, uint nrxd,
uint rxbufsize, int rxextheadroom, uint rxbufsize, int rxextheadroom,
...@@ -87,7 +86,11 @@ bool dma_rxfill(struct dma_pub *pub); ...@@ -87,7 +86,11 @@ bool dma_rxfill(struct dma_pub *pub);
bool dma_rxreset(struct dma_pub *pub); bool dma_rxreset(struct dma_pub *pub);
bool dma_txreset(struct dma_pub *pub); bool dma_txreset(struct dma_pub *pub);
void dma_txinit(struct dma_pub *pub); void dma_txinit(struct dma_pub *pub);
int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit); int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
struct sk_buff *p0);
void dma_txflush(struct dma_pub *pub);
int dma_txpending(struct dma_pub *pub);
void dma_kick_tx(struct dma_pub *pub);
void dma_txsuspend(struct dma_pub *pub); void dma_txsuspend(struct dma_pub *pub);
bool dma_txsuspended(struct dma_pub *pub); bool dma_txsuspended(struct dma_pub *pub);
void dma_txresume(struct dma_pub *pub); void dma_txresume(struct dma_pub *pub);
......
...@@ -239,7 +239,6 @@ struct brcms_core { ...@@ -239,7 +239,6 @@ struct brcms_core {
/* fifo */ /* fifo */
uint *txavail[NFIFO]; /* # tx descriptors available */ uint *txavail[NFIFO]; /* # tx descriptors available */
s16 txpktpend[NFIFO]; /* tx admission control */
struct macstat *macstat_snapshot; /* mac hw prev read values */ struct macstat *macstat_snapshot; /* mac hw prev read values */
}; };
...@@ -379,19 +378,6 @@ struct brcms_hardware { ...@@ -379,19 +378,6 @@ struct brcms_hardware {
*/ */
}; };
/* TX Queue information
*
* Each flow of traffic out of the device has a TX Queue with independent
* flow control. Several interfaces may be associated with a single TX Queue
* if they belong to the same flow of traffic from the device. For multi-channel
* operation there are independent TX Queues for each channel.
*/
struct brcms_txq_info {
struct brcms_txq_info *next;
struct pktq q;
uint stopped; /* tx flow control bits */
};
/* /*
* Principal common driver data structure. * Principal common driver data structure.
* *
...@@ -432,11 +418,8 @@ struct brcms_txq_info { ...@@ -432,11 +418,8 @@ struct brcms_txq_info {
* WDlast: last time wlc_watchdog() was called. * WDlast: last time wlc_watchdog() was called.
* edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac. * edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac.
* wme_retries: per-AC retry limits. * wme_retries: per-AC retry limits.
* tx_prec_map: Precedence map based on HW FIFO space.
* fifo2prec_map[NFIFO]: pointer to fifo2_prec map based on WME.
* bsscfg: set of BSS configurations, idx 0 is default and always valid. * bsscfg: set of BSS configurations, idx 0 is default and always valid.
* cfg: the primary bsscfg (can be AP or STA). * cfg: the primary bsscfg (can be AP or STA).
* tx_queues: common TX Queue list.
* modulecb: * modulecb:
* mimoft: SIGN or 11N. * mimoft: SIGN or 11N.
* cck_40txbw: 11N, cck tx b/w override when in 40MHZ mode. * cck_40txbw: 11N, cck tx b/w override when in 40MHZ mode.
...@@ -466,7 +449,6 @@ struct brcms_txq_info { ...@@ -466,7 +449,6 @@ struct brcms_txq_info {
* tempsense_lasttime; * tempsense_lasttime;
* tx_duty_cycle_ofdm: maximum allowed duty cycle for OFDM. * tx_duty_cycle_ofdm: maximum allowed duty cycle for OFDM.
* tx_duty_cycle_cck: maximum allowed duty cycle for CCK. * tx_duty_cycle_cck: maximum allowed duty cycle for CCK.
* pkt_queue: txq for transmit packets.
* wiphy: * wiphy:
* pri_scb: primary Station Control Block * pri_scb: primary Station Control Block
*/ */
...@@ -530,13 +512,9 @@ struct brcms_c_info { ...@@ -530,13 +512,9 @@ struct brcms_c_info {
u16 edcf_txop[IEEE80211_NUM_ACS]; u16 edcf_txop[IEEE80211_NUM_ACS];
u16 wme_retries[IEEE80211_NUM_ACS]; u16 wme_retries[IEEE80211_NUM_ACS];
u16 tx_prec_map;
struct brcms_bss_cfg *bsscfg; struct brcms_bss_cfg *bsscfg;
/* tx queue */
struct brcms_txq_info *tx_queues;
struct modulecb *modulecb; struct modulecb *modulecb;
u8 mimoft; u8 mimoft;
...@@ -581,7 +559,6 @@ struct brcms_c_info { ...@@ -581,7 +559,6 @@ struct brcms_c_info {
u16 tx_duty_cycle_ofdm; u16 tx_duty_cycle_ofdm;
u16 tx_duty_cycle_cck; u16 tx_duty_cycle_cck;
struct brcms_txq_info *pkt_queue;
struct wiphy *wiphy; struct wiphy *wiphy;
struct scb pri_scb; struct scb pri_scb;
}; };
...@@ -633,11 +610,8 @@ struct brcms_bss_cfg { ...@@ -633,11 +610,8 @@ struct brcms_bss_cfg {
struct brcms_bss_info *current_bss; struct brcms_bss_info *current_bss;
}; };
extern void brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, extern int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
struct sk_buff *p, bool commit); struct sk_buff *p);
extern void brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo);
extern void brcms_c_txq_enq(struct brcms_c_info *wlc, struct scb *scb,
struct sk_buff *sdu);
extern void brcms_c_print_txstatus(struct tx_status *txs); extern void brcms_c_print_txstatus(struct tx_status *txs);
extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo, extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
uint *blocks); uint *blocks);
...@@ -652,9 +626,6 @@ static inline void brcms_c_print_txdesc(struct d11txh *txh) ...@@ -652,9 +626,6 @@ static inline void brcms_c_print_txdesc(struct d11txh *txh)
extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config); extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags); extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
extern void brcms_c_send_q(struct brcms_c_info *wlc);
extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
uint *fifo);
extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec, extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
uint mac_len); uint mac_len);
extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
......
...@@ -200,19 +200,6 @@ enum wlc_par_id { ...@@ -200,19 +200,6 @@ enum wlc_par_id {
/* WL11N Support */ /* WL11N Support */
#define AMPDU_AGG_HOST 1 #define AMPDU_AGG_HOST 1
#define BRCMS_PREC_COUNT 4 /* Max precedence level implemented */
/* Mask to describe all precedence levels */
#define BRCMS_PREC_BMP_ALL MAXBITVAL(BRCMS_PREC_COUNT)
/*
* This maps priority to one precedence higher - Used by PS-Poll response
* packets to simulate enqueue-at-head operation, but still maintain the
* order on the queue
*/
#define BRCMS_PRIO_TO_HI_PREC(pri) min(BRCMS_PRIO_TO_PREC(pri) + 1,\
BRCMS_PREC_COUNT - 1)
/* network protection config */ /* network protection config */
#define BRCMS_PROT_G_SPEC 1 /* SPEC g protection */ #define BRCMS_PROT_G_SPEC 1 /* SPEC g protection */
#define BRCMS_PROT_G_OVR 2 /* SPEC g prot override */ #define BRCMS_PROT_G_OVR 2 /* SPEC g prot override */
......
...@@ -281,7 +281,6 @@ struct ieee80211_tx_queue_params; ...@@ -281,7 +281,6 @@ struct ieee80211_tx_queue_params;
struct brcms_info; struct brcms_info;
struct brcms_c_info; struct brcms_c_info;
struct brcms_hardware; struct brcms_hardware;
struct brcms_txq_info;
struct brcms_band; struct brcms_band;
struct dma_pub; struct dma_pub;
struct si_pub; struct si_pub;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment