Commit d6a3cc2e authored by Eliad Peller's avatar Eliad Peller Committed by Luciano Coelho

wl12xx: unify STA and AP tx_queue mechanism

Make sta use the global wl->links[hlid].tx_queue (by
considering its links map) instead of wl->tx_queue,
and then unify the tx and tx_reset flows for the
various vifs.
Signed-off-by: default avatarEliad Peller <eliad@wizery.com>
Signed-off-by: default avatarLuciano Coelho <coelho@ti.com>
parent 4438aca9
...@@ -1474,30 +1474,25 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) ...@@ -1474,30 +1474,25 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
unsigned long flags; unsigned long flags;
int q, mapping; int q, mapping;
u8 hlid = 0; u8 hlid;
mapping = skb_get_queue_mapping(skb); mapping = skb_get_queue_mapping(skb);
q = wl1271_tx_get_queue(mapping); q = wl1271_tx_get_queue(mapping);
if (wlvif->bss_type == BSS_TYPE_AP_BSS) hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
hlid = wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
spin_lock_irqsave(&wl->wl_lock, flags); spin_lock_irqsave(&wl->wl_lock, flags);
/* queue the packet */ /* queue the packet */
if (wlvif->bss_type == BSS_TYPE_AP_BSS) { if (hlid == WL12XX_INVALID_LINK_ID ||
if (!test_bit(hlid, wlvif->links_map)) { !test_bit(hlid, wlvif->links_map)) {
wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
hlid, q);
dev_kfree_skb(skb); dev_kfree_skb(skb);
goto out; goto out;
} }
wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q); wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
} else {
skb_queue_tail(&wl->tx_queue[q], skb);
}
wl->tx_queue_count[q]++; wl->tx_queue_count[q]++;
...@@ -2131,7 +2126,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, ...@@ -2131,7 +2126,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
mutex_lock(&wl->mutex); mutex_lock(&wl->mutex);
/* let's notify MAC80211 about the remaining pending TX frames */ /* let's notify MAC80211 about the remaining pending TX frames */
wl1271_tx_reset(wl, reset_tx_queues); wl12xx_tx_reset_wlvif(wl, wlvif);
wl12xx_tx_reset(wl, reset_tx_queues);
wl1271_power_off(wl); wl1271_power_off(wl);
wl->band = IEEE80211_BAND_2GHZ; wl->band = IEEE80211_BAND_2GHZ;
...@@ -3968,7 +3964,6 @@ static int wl1271_allocate_sta(struct wl1271 *wl, ...@@ -3968,7 +3964,6 @@ static int wl1271_allocate_sta(struct wl1271 *wl,
return 0; return 0;
} }
/* TODO: change wl1271_tx_reset(), so we can get sta as param */
void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
{ {
if (!test_bit(hlid, wlvif->ap.sta_hlid_map)) if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
...@@ -4867,9 +4862,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void) ...@@ -4867,9 +4862,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->hw = hw; wl->hw = hw;
wl->plat_dev = plat_dev; wl->plat_dev = plat_dev;
for (i = 0; i < NUM_TX_QUEUES; i++)
skb_queue_head_init(&wl->tx_queue[i]);
for (i = 0; i < NUM_TX_QUEUES; i++) for (i = 0; i < NUM_TX_QUEUES; i++)
for (j = 0; j < WL12XX_MAX_LINKS; j++) for (j = 0; j < WL12XX_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]); skb_queue_head_init(&wl->links[j].tx_queue[i]);
......
...@@ -179,12 +179,10 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, ...@@ -179,12 +179,10 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
} }
} }
static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct ieee80211_vif *vif, u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
if (wl12xx_is_dummy_packet(wl, skb)) if (wl12xx_is_dummy_packet(wl, skb))
return wl->system_hlid; return wl->system_hlid;
...@@ -429,7 +427,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb, ...@@ -429,7 +427,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
wlvif->default_key = idx; wlvif->default_key = idx;
} }
} }
hlid = wl1271_tx_get_hlid(wl, vif, skb); hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
if (hlid == WL12XX_INVALID_LINK_ID) { if (hlid == WL12XX_INVALID_LINK_ID) {
wl1271_error("invalid hlid. dropping skb 0x%p", skb); wl1271_error("invalid hlid. dropping skb 0x%p", skb);
return -EINVAL; return -EINVAL;
...@@ -538,19 +536,18 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, ...@@ -538,19 +536,18 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
return &queues[q]; return &queues[q];
} }
static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl) static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
struct wl1271_link *lnk)
{ {
struct sk_buff *skb = NULL; struct sk_buff *skb;
unsigned long flags; unsigned long flags;
struct sk_buff_head *queue; struct sk_buff_head *queue;
queue = wl1271_select_queue(wl, wl->tx_queue); queue = wl1271_select_queue(wl, lnk->tx_queue);
if (!queue) if (!queue)
goto out; return NULL;
skb = skb_dequeue(queue); skb = skb_dequeue(queue);
out:
if (skb) { if (skb) {
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags); spin_lock_irqsave(&wl->wl_lock, flags);
...@@ -561,13 +558,11 @@ static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl) ...@@ -561,13 +558,11 @@ static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
return skb; return skb;
} }
static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl, static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
struct wl12xx_vif *wlvif) struct wl12xx_vif *wlvif)
{ {
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
unsigned long flags;
int i, h, start_hlid; int i, h, start_hlid;
struct sk_buff_head *queue;
/* start from the link after the last one */ /* start from the link after the last one */
start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS; start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
...@@ -580,24 +575,16 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl, ...@@ -580,24 +575,16 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl,
if (!test_bit(h, wlvif->links_map)) if (!test_bit(h, wlvif->links_map))
continue; continue;
queue = wl1271_select_queue(wl, wl->links[h].tx_queue); skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
if (!queue) if (!skb)
continue; continue;
skb = skb_dequeue(queue); wlvif->last_tx_hlid = h;
if (skb)
break; break;
} }
if (skb) { if (!skb)
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wlvif->last_tx_hlid = h;
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
} else {
wlvif->last_tx_hlid = 0; wlvif->last_tx_hlid = 0;
}
return skb; return skb;
} }
...@@ -608,11 +595,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, ...@@ -608,11 +595,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl,
unsigned long flags; unsigned long flags;
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
if (wlvif->bss_type == BSS_TYPE_AP_BSS) skb = wl12xx_vif_skb_dequeue(wl, wlvif);
skb = wl1271_ap_skb_dequeue(wl, wlvif);
else
skb = wl1271_sta_skb_dequeue(wl);
if (!skb && if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
int q; int q;
...@@ -627,24 +610,21 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, ...@@ -627,24 +610,21 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl,
return skb; return skb;
} }
static void wl1271_skb_queue_head(struct wl1271 *wl, struct ieee80211_vif *vif, static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
unsigned long flags; unsigned long flags;
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
if (wl12xx_is_dummy_packet(wl, skb)) { if (wl12xx_is_dummy_packet(wl, skb)) {
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) { } else {
u8 hlid = wl1271_tx_get_hlid(wl, vif, skb); u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
skb_queue_head(&wl->links[hlid].tx_queue[q], skb); skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
/* make sure we dequeue the same packet next time */ /* make sure we dequeue the same packet next time */
wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) % wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
WL12XX_MAX_LINKS; WL12XX_MAX_LINKS;
} else {
skb_queue_head(&wl->tx_queue[q], skb);
} }
spin_lock_irqsave(&wl->wl_lock, flags); spin_lock_irqsave(&wl->wl_lock, flags);
...@@ -682,7 +662,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl, struct ieee80211_vif *vif) ...@@ -682,7 +662,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl, struct ieee80211_vif *vif)
* Aggregation buffer is full. * Aggregation buffer is full.
* Flush buffer and try again. * Flush buffer and try again.
*/ */
wl1271_skb_queue_head(wl, vif, skb); wl1271_skb_queue_head(wl, wlvif, skb);
wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
buf_offset, true); buf_offset, true);
sent_packets = true; sent_packets = true;
...@@ -693,7 +673,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl, struct ieee80211_vif *vif) ...@@ -693,7 +673,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl, struct ieee80211_vif *vif)
* Firmware buffer is full. * Firmware buffer is full.
* Queue back last skb, and stop aggregating. * Queue back last skb, and stop aggregating.
*/ */
wl1271_skb_queue_head(wl, vif, skb); wl1271_skb_queue_head(wl, wlvif, skb);
/* No work left, avoid scheduling redundant tx work */ /* No work left, avoid scheduling redundant tx work */
set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
goto out_ack; goto out_ack;
...@@ -907,41 +887,30 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) ...@@ -907,41 +887,30 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
} }
/* caller must hold wl->mutex and TX must be stopped */ /* caller must hold wl->mutex and TX must be stopped */
void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues) void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{ {
struct ieee80211_vif *vif = wl->vif; /* TODO: get as param */
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int i; int i;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
/* TX failure */ /* TX failure */
if (wlvif->bss_type == BSS_TYPE_AP_BSS) { for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
for (i = 0; i < WL12XX_MAX_LINKS; i++) { if (wlvif->bss_type == BSS_TYPE_AP_BSS)
wl1271_free_sta(wl, wlvif, i); wl1271_free_sta(wl, wlvif, i);
else
wlvif->sta.ba_rx_bitmap = 0;
wl1271_tx_reset_link_queues(wl, i); wl1271_tx_reset_link_queues(wl, i);
wl->links[i].allocated_pkts = 0; wl->links[i].allocated_pkts = 0;
wl->links[i].prev_freed_pkts = 0; wl->links[i].prev_freed_pkts = 0;
} }
wlvif->last_tx_hlid = 0; wlvif->last_tx_hlid = 0;
} else {
for (i = 0; i < NUM_TX_QUEUES; i++) {
while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
skb);
if (!wl12xx_is_dummy_packet(wl, skb)) {
info = IEEE80211_SKB_CB(skb);
info->status.rates[0].idx = -1;
info->status.rates[0].count = 0;
ieee80211_tx_status_ni(wl->hw, skb);
}
}
}
wlvif->sta.ba_rx_bitmap = 0; }
} /* caller must hold wl->mutex and TX must be stopped */
void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
{
int i;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
for (i = 0; i < NUM_TX_QUEUES; i++) for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_queue_count[i] = 0; wl->tx_queue_count[i] = 0;
......
...@@ -206,7 +206,8 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl) ...@@ -206,7 +206,8 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
void wl1271_tx_work(struct work_struct *work); void wl1271_tx_work(struct work_struct *work);
void wl1271_tx_work_locked(struct wl1271 *wl, struct ieee80211_vif *vif); void wl1271_tx_work_locked(struct wl1271 *wl, struct ieee80211_vif *vif);
void wl1271_tx_complete(struct wl1271 *wl); void wl1271_tx_complete(struct wl1271 *wl);
void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues); void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
void wl1271_tx_flush(struct wl1271 *wl); void wl1271_tx_flush(struct wl1271 *wl);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
...@@ -214,6 +215,8 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, ...@@ -214,6 +215,8 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb); struct sk_buff *skb);
u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb);
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
void wl1271_handle_tx_low_watermark(struct wl1271 *wl); void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
......
...@@ -416,7 +416,6 @@ struct wl1271 { ...@@ -416,7 +416,6 @@ struct wl1271 {
s64 time_offset; s64 time_offset;
/* Frames scheduled for transmission, not handled yet */ /* Frames scheduled for transmission, not handled yet */
struct sk_buff_head tx_queue[NUM_TX_QUEUES];
int tx_queue_count[NUM_TX_QUEUES]; int tx_queue_count[NUM_TX_QUEUES];
long stopped_queues_map; long stopped_queues_map;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment