Commit 6742f554 authored by Juuso Oikarinen's avatar Juuso Oikarinen Committed by Luciano Coelho

wl12xx: Change TX queue to be per AC

With the current single-queue implementation traffic priorization is not
working correctly - when using multiple BE streams and one, say VI stream,
the VI stream will share bandwidth almost equally with the BE streams.

To fix the issue, implement per AC queues, which are emptied in priority
order to the firmware. To keep it relatively simple, maintain a global
buffer count and global queue stop/wake instead of per-AC.

With these changes, priorization appears to work just fine.
Signed-off-by: default avatarJuuso Oikarinen <juuso.oikarinen@nokia.com>
Signed-off-by: default avatarLuciano Coelho <luciano.coelho@nokia.com>
parent 17c1755c
...@@ -225,7 +225,7 @@ static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf, ...@@ -225,7 +225,7 @@ static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf,
char buf[20]; char buf[20];
int res; int res;
queue_len = skb_queue_len(&wl->tx_queue); queue_len = wl->tx_queue_count;
res = scnprintf(buf, sizeof(buf), "%u\n", queue_len); res = scnprintf(buf, sizeof(buf), "%u\n", queue_len);
return simple_read_from_buffer(userbuf, count, ppos, buf, res); return simple_read_from_buffer(userbuf, count, ppos, buf, res);
......
...@@ -570,7 +570,7 @@ static void wl1271_irq_work(struct work_struct *work) ...@@ -570,7 +570,7 @@ static void wl1271_irq_work(struct work_struct *work)
/* Check if any tx blocks were freed */ /* Check if any tx blocks were freed */
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
!skb_queue_empty(&wl->tx_queue)) { wl->tx_queue_count) {
/* /*
* In order to avoid starvation of the TX path, * In order to avoid starvation of the TX path,
* call the work function directly. * call the work function directly.
...@@ -891,6 +891,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) ...@@ -891,6 +891,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = txinfo->control.sta; struct ieee80211_sta *sta = txinfo->control.sta;
unsigned long flags; unsigned long flags;
int q;
/* /*
* peek into the rates configured in the STA entry. * peek into the rates configured in the STA entry.
...@@ -918,10 +919,12 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) ...@@ -918,10 +919,12 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags); set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
} }
#endif #endif
wl->tx_queue_count++;
spin_unlock_irqrestore(&wl->wl_lock, flags); spin_unlock_irqrestore(&wl->wl_lock, flags);
/* queue the packet */ /* queue the packet */
skb_queue_tail(&wl->tx_queue, skb); q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
skb_queue_tail(&wl->tx_queue[q], skb);
/* /*
* The chip specific setup must run before the first TX packet - * The chip specific setup must run before the first TX packet -
...@@ -935,7 +938,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) ...@@ -935,7 +938,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* The workqueue is slow to process the tx_queue and we need stop * The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long. * the queue here, otherwise the queue will get too long.
*/ */
if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_HIGH_WATERMARK) { if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
wl1271_debug(DEBUG_TX, "op_tx: stopping queues"); wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
spin_lock_irqsave(&wl->wl_lock, flags); spin_lock_irqsave(&wl->wl_lock, flags);
...@@ -2719,7 +2722,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void) ...@@ -2719,7 +2722,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->hw = hw; wl->hw = hw;
wl->plat_dev = plat_dev; wl->plat_dev = plat_dev;
skb_queue_head_init(&wl->tx_queue); for (i = 0; i < NUM_TX_QUEUES; i++)
skb_queue_head_init(&wl->tx_queue[i]);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work); INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
......
...@@ -125,7 +125,6 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, ...@@ -125,7 +125,6 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
/* queue (we use same identifiers for tid's and ac's */ /* queue (we use same identifiers for tid's and ac's */
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
desc->tid = ac; desc->tid = ac;
desc->aid = TX_HW_DEFAULT_AID; desc->aid = TX_HW_DEFAULT_AID;
desc->reserved = 0; desc->reserved = 0;
...@@ -228,7 +227,7 @@ static void handle_tx_low_watermark(struct wl1271 *wl) ...@@ -228,7 +227,7 @@ static void handle_tx_low_watermark(struct wl1271 *wl)
unsigned long flags; unsigned long flags;
if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) && if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) { wl->tx_queue_count <= WL1271_TX_QUEUE_LOW_WATERMARK) {
/* firmware buffer has space, restart queues */ /* firmware buffer has space, restart queues */
spin_lock_irqsave(&wl->wl_lock, flags); spin_lock_irqsave(&wl->wl_lock, flags);
ieee80211_wake_queues(wl->hw); ieee80211_wake_queues(wl->hw);
...@@ -237,6 +236,43 @@ static void handle_tx_low_watermark(struct wl1271 *wl) ...@@ -237,6 +236,43 @@ static void handle_tx_low_watermark(struct wl1271 *wl)
} }
} }
static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
{
struct sk_buff *skb = NULL;
unsigned long flags;
skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VO]);
if (skb)
goto out;
skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VI]);
if (skb)
goto out;
skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BE]);
if (skb)
goto out;
skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BK]);
out:
if (skb) {
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
return skb;
}
static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
{
unsigned long flags;
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
skb_queue_head(&wl->tx_queue[q], skb);
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count++;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
void wl1271_tx_work_locked(struct wl1271 *wl) void wl1271_tx_work_locked(struct wl1271 *wl)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -270,7 +306,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl) ...@@ -270,7 +306,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
wl1271_acx_rate_policies(wl); wl1271_acx_rate_policies(wl);
} }
while ((skb = skb_dequeue(&wl->tx_queue))) { while ((skb = wl1271_skb_dequeue(wl))) {
if (!woken_up) { if (!woken_up) {
ret = wl1271_ps_elp_wakeup(wl, false); ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0) if (ret < 0)
...@@ -284,7 +320,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl) ...@@ -284,7 +320,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
* Aggregation buffer is full. * Aggregation buffer is full.
* Flush buffer and try again. * Flush buffer and try again.
*/ */
skb_queue_head(&wl->tx_queue, skb); wl1271_skb_queue_head(wl, skb);
wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
buf_offset, true); buf_offset, true);
sent_packets = true; sent_packets = true;
...@@ -295,7 +331,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl) ...@@ -295,7 +331,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
* Firmware buffer is full. * Firmware buffer is full.
* Queue back last skb, and stop aggregating. * Queue back last skb, and stop aggregating.
*/ */
skb_queue_head(&wl->tx_queue, skb); wl1271_skb_queue_head(wl, skb);
/* No work left, avoid scheduling redundant tx work */ /* No work left, avoid scheduling redundant tx work */
set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
goto out_ack; goto out_ack;
...@@ -440,10 +476,13 @@ void wl1271_tx_reset(struct wl1271 *wl) ...@@ -440,10 +476,13 @@ void wl1271_tx_reset(struct wl1271 *wl)
struct sk_buff *skb; struct sk_buff *skb;
/* TX failure */ /* TX failure */
while ((skb = skb_dequeue(&wl->tx_queue))) { for (i = 0; i < NUM_TX_QUEUES; i++) {
while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
ieee80211_tx_status(wl->hw, skb); ieee80211_tx_status(wl->hw, skb);
} }
}
wl->tx_queue_count = 0;
/* /*
* Make sure the driver is at a consistent state, in case this * Make sure the driver is at a consistent state, in case this
...@@ -472,8 +511,7 @@ void wl1271_tx_flush(struct wl1271 *wl) ...@@ -472,8 +511,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
mutex_lock(&wl->mutex); mutex_lock(&wl->mutex);
wl1271_debug(DEBUG_TX, "flushing tx buffer: %d", wl1271_debug(DEBUG_TX, "flushing tx buffer: %d",
wl->tx_frames_cnt); wl->tx_frames_cnt);
if ((wl->tx_frames_cnt == 0) && if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
skb_queue_empty(&wl->tx_queue)) {
mutex_unlock(&wl->mutex); mutex_unlock(&wl->mutex);
return; return;
} }
......
...@@ -292,7 +292,8 @@ struct wl1271 { ...@@ -292,7 +292,8 @@ struct wl1271 {
int session_counter; int session_counter;
/* Frames scheduled for transmission, not handled yet */ /* Frames scheduled for transmission, not handled yet */
struct sk_buff_head tx_queue; struct sk_buff_head tx_queue[NUM_TX_QUEUES];
int tx_queue_count;
struct work_struct tx_work; struct work_struct tx_work;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment