Commit 66396114 authored by Arik Nemtsov's avatar Arik Nemtsov Committed by Luciano Coelho

wlcore: add stop reason bitmap for waking/starting queues

Allow the driver to wake/stop the queues for multiple reasons. A queue
is started when no stop-reasons exist.

Convert all wake/stop queue calls to use the new API.

Before, a stopped queue was almost synonymous a high-watermark on Tx.
Remove a bit of code in wl12xx_tx_reset() that relied on it.

Internal packets arriving from mac80211 are also discarded when a queue
is stopped. A notable exception to this is the watermark reason, which
is a "soft"-stop reason. We allow traffic to gradually come to a halt,
but we don't mind spurious packets here and there. This is merely a flow
regulation mechanism.

Based on a similar patch by Eliad Peller <eliadWizery.com>.
Signed-off-by: default avatarArik Nemtsov <arik@wizery.com>
Signed-off-by: default avatarLuciano Coelho <coelho@ti.com>
parent 32bb2c03
...@@ -865,7 +865,7 @@ static void wl1271_recovery_work(struct work_struct *work) ...@@ -865,7 +865,7 @@ static void wl1271_recovery_work(struct work_struct *work)
} }
/* Prevent spurious TX during FW restart */ /* Prevent spurious TX during FW restart */
ieee80211_stop_queues(wl->hw); wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
if (wl->sched_scanning) { if (wl->sched_scanning) {
ieee80211_sched_scan_stopped(wl->hw); ieee80211_sched_scan_stopped(wl->hw);
...@@ -890,7 +890,7 @@ static void wl1271_recovery_work(struct work_struct *work) ...@@ -890,7 +890,7 @@ static void wl1271_recovery_work(struct work_struct *work)
* Its safe to enable TX now - the queues are stopped after a request * Its safe to enable TX now - the queues are stopped after a request
* to restart the HW. * to restart the HW.
*/ */
ieee80211_wake_queues(wl->hw); wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
return; return;
out_unlock: out_unlock:
mutex_unlock(&wl->mutex); mutex_unlock(&wl->mutex);
...@@ -1107,9 +1107,16 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) ...@@ -1107,9 +1107,16 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
spin_lock_irqsave(&wl->wl_lock, flags); spin_lock_irqsave(&wl->wl_lock, flags);
/* queue the packet */ /*
* drop the packet if the link is invalid or the queue is stopped
* for any reason but watermark. Watermark is a "soft"-stop so we
* allow these packets through.
*/
if (hlid == WL12XX_INVALID_LINK_ID || if (hlid == WL12XX_INVALID_LINK_ID ||
(wlvif && !test_bit(hlid, wlvif->links_map))) { (wlvif && !test_bit(hlid, wlvif->links_map)) ||
(wlcore_is_queue_stopped(wl, q) &&
!wlcore_is_queue_stopped_by_reason(wl, q,
WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q); wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
ieee80211_free_txskb(hw, skb); ieee80211_free_txskb(hw, skb);
goto out; goto out;
...@@ -1127,8 +1134,8 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) ...@@ -1127,8 +1134,8 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
*/ */
if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) { if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q); wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
ieee80211_stop_queue(wl->hw, mapping); wlcore_stop_queue_locked(wl, q,
set_bit(q, &wl->stopped_queues_map); WLCORE_QUEUE_STOP_REASON_WATERMARK);
} }
/* /*
...@@ -1711,7 +1718,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw) ...@@ -1711,7 +1718,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&wl->connection_loss_work); cancel_delayed_work_sync(&wl->connection_loss_work);
/* let's notify MAC80211 about the remaining pending TX frames */ /* let's notify MAC80211 about the remaining pending TX frames */
wl12xx_tx_reset(wl, true); wl12xx_tx_reset(wl);
mutex_lock(&wl->mutex); mutex_lock(&wl->mutex);
wl1271_power_off(wl); wl1271_power_off(wl);
......
...@@ -443,18 +443,15 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, ...@@ -443,18 +443,15 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
void wl1271_handle_tx_low_watermark(struct wl1271 *wl) void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
{ {
unsigned long flags;
int i; int i;
for (i = 0; i < NUM_TX_QUEUES; i++) { for (i = 0; i < NUM_TX_QUEUES; i++) {
if (test_bit(i, &wl->stopped_queues_map) && if (wlcore_is_queue_stopped_by_reason(wl, i,
WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) { wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
/* firmware buffer has space, restart queues */ /* firmware buffer has space, restart queues */
spin_lock_irqsave(&wl->wl_lock, flags); wlcore_wake_queue(wl, i,
ieee80211_wake_queue(wl->hw, WLCORE_QUEUE_STOP_REASON_WATERMARK);
wl1271_tx_get_mac80211_queue(i));
clear_bit(i, &wl->stopped_queues_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
} }
} }
} }
...@@ -963,7 +960,7 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) ...@@ -963,7 +960,7 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
} }
/* caller must hold wl->mutex and TX must be stopped */ /* caller must hold wl->mutex and TX must be stopped */
void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) void wl12xx_tx_reset(struct wl1271 *wl)
{ {
int i; int i;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -978,15 +975,12 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) ...@@ -978,15 +975,12 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
wl->tx_queue_count[i] = 0; wl->tx_queue_count[i] = 0;
} }
wl->stopped_queues_map = 0;
/* /*
* Make sure the driver is at a consistent state, in case this * Make sure the driver is at a consistent state, in case this
* function is called from a context other than interface removal. * function is called from a context other than interface removal.
* This call will always wake the TX queues. * This call will always wake the TX queues.
*/ */
if (reset_tx_queues) wl1271_handle_tx_low_watermark(wl);
wl1271_handle_tx_low_watermark(wl);
for (i = 0; i < wl->num_tx_desc; i++) { for (i = 0; i < wl->num_tx_desc; i++) {
if (wl->tx_frames[i] == NULL) if (wl->tx_frames[i] == NULL)
...@@ -1060,3 +1054,94 @@ u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) ...@@ -1060,3 +1054,94 @@ u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
return BIT(__ffs(rate_set)); return BIT(__ffs(rate_set));
} }
void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
enum wlcore_queue_stop_reason reason)
{
bool stopped = !!wl->queue_stop_reasons[queue];
/* queue should not be stopped for this reason */
WARN_ON(test_and_set_bit(reason, &wl->queue_stop_reasons[queue]));
if (stopped)
return;
ieee80211_stop_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
}
void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
enum wlcore_queue_stop_reason reason)
{
unsigned long flags;
spin_lock_irqsave(&wl->wl_lock, flags);
wlcore_stop_queue_locked(wl, queue, reason);
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
enum wlcore_queue_stop_reason reason)
{
unsigned long flags;
spin_lock_irqsave(&wl->wl_lock, flags);
/* queue should not be clear for this reason */
WARN_ON(!test_and_clear_bit(reason, &wl->queue_stop_reasons[queue]));
if (wl->queue_stop_reasons[queue])
goto out;
ieee80211_wake_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
out:
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
void wlcore_stop_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason)
{
int i;
for (i = 0; i < NUM_TX_QUEUES; i++)
wlcore_stop_queue(wl, i, reason);
}
void wlcore_wake_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason)
{
int i;
for (i = 0; i < NUM_TX_QUEUES; i++)
wlcore_wake_queue(wl, i, reason);
}
void wlcore_reset_stopped_queues(struct wl1271 *wl)
{
int i;
unsigned long flags;
spin_lock_irqsave(&wl->wl_lock, flags);
for (i = 0; i < NUM_TX_QUEUES; i++) {
if (!wl->queue_stop_reasons[i])
continue;
wl->queue_stop_reasons[i] = 0;
ieee80211_wake_queue(wl->hw,
wl1271_tx_get_mac80211_queue(i));
}
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
enum wlcore_queue_stop_reason reason)
{
return test_bit(reason, &wl->queue_stop_reasons[queue]);
}
bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue)
{
return !!wl->queue_stop_reasons[queue];
}
...@@ -184,6 +184,11 @@ struct wl1271_tx_hw_res_if { ...@@ -184,6 +184,11 @@ struct wl1271_tx_hw_res_if {
struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
} __packed; } __packed;
enum wlcore_queue_stop_reason {
WLCORE_QUEUE_STOP_REASON_WATERMARK,
WLCORE_QUEUE_STOP_REASON_FW_RESTART,
};
static inline int wl1271_tx_get_queue(int queue) static inline int wl1271_tx_get_queue(int queue)
{ {
switch (queue) { switch (queue) {
...@@ -230,7 +235,7 @@ void wl1271_tx_work(struct work_struct *work); ...@@ -230,7 +235,7 @@ void wl1271_tx_work(struct work_struct *work);
void wl1271_tx_work_locked(struct wl1271 *wl); void wl1271_tx_work_locked(struct wl1271 *wl);
void wl1271_tx_complete(struct wl1271 *wl); void wl1271_tx_complete(struct wl1271 *wl);
void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif); void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues); void wl12xx_tx_reset(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl); void wl1271_tx_flush(struct wl1271 *wl);
u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band); u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
...@@ -247,6 +252,20 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids); ...@@ -247,6 +252,20 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
unsigned int packet_length); unsigned int packet_length);
void wl1271_free_tx_id(struct wl1271 *wl, int id); void wl1271_free_tx_id(struct wl1271 *wl, int id);
void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
enum wlcore_queue_stop_reason reason);
void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
enum wlcore_queue_stop_reason reason);
void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
enum wlcore_queue_stop_reason reason);
void wlcore_stop_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason);
void wlcore_wake_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason);
void wlcore_reset_stopped_queues(struct wl1271 *wl);
bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
enum wlcore_queue_stop_reason reason);
bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue);
/* from main.c */ /* from main.c */
void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
......
...@@ -209,7 +209,7 @@ struct wl1271 { ...@@ -209,7 +209,7 @@ struct wl1271 {
/* Frames scheduled for transmission, not handled yet */ /* Frames scheduled for transmission, not handled yet */
int tx_queue_count[NUM_TX_QUEUES]; int tx_queue_count[NUM_TX_QUEUES];
long stopped_queues_map; unsigned long queue_stop_reasons[NUM_TX_QUEUES];
/* Frames received, not handled yet by mac80211 */ /* Frames received, not handled yet by mac80211 */
struct sk_buff_head deferred_rx_queue; struct sk_buff_head deferred_rx_queue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment