Commit 9794c64f authored by Liad Kaufman's avatar Liad Kaufman Committed by Luca Coelho

iwlwifi: mvm: support dqa queue inactivation upon timeout

Support marking queues as inactive upon a timeout expiring,
and allow inactive queues to be re-assigned to other RA/TIDs
if no other queue is free.

This is done by keeping a timestamp of the latest frame TXed
for every RA/TID, and then going over the queues currently in
use when a new queue is needed, inactivating all those that
are inactive.
Signed-off-by: default avatarLiad Kaufman <liad.kaufman@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent ca221c9b
...@@ -687,13 +687,22 @@ struct iwl_mvm_baid_data { ...@@ -687,13 +687,22 @@ struct iwl_mvm_baid_data {
* This is the state of a queue that has been fully configured (including * This is the state of a queue that has been fully configured (including
* SCD pointers, etc), has a specific RA/TID assigned to it, and can be * SCD pointers, etc), has a specific RA/TID assigned to it, and can be
* used to send traffic. * used to send traffic.
* @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it
* This is a state of a queue that has had traffic on it, but during the
* last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on
* it. In this state, when a new queue is needed to be allocated but no
* such free queue exists, an inactive queue might be freed and given to
* the new RA/TID.
*/ */
enum iwl_mvm_queue_status { enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_FREE, IWL_MVM_QUEUE_FREE,
IWL_MVM_QUEUE_RESERVED, IWL_MVM_QUEUE_RESERVED,
IWL_MVM_QUEUE_READY, IWL_MVM_QUEUE_READY,
IWL_MVM_QUEUE_INACTIVE,
}; };
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
struct iwl_mvm { struct iwl_mvm {
/* for logger access */ /* for logger access */
struct device *dev; struct device *dev;
...@@ -750,11 +759,15 @@ struct iwl_mvm { ...@@ -750,11 +759,15 @@ struct iwl_mvm {
u32 hw_queue_to_mac80211; u32 hw_queue_to_mac80211;
u8 hw_queue_refcount; u8 hw_queue_refcount;
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
/* Timestamp for inactivation per TID of this queue */
unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
enum iwl_mvm_queue_status status; enum iwl_mvm_queue_status status;
} queue_info[IWL_MAX_HW_QUEUES]; } queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */ struct work_struct add_stream_wk; /* To add streams to queues */
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
const char *nvm_file_name; const char *nvm_file_name;
...@@ -1618,7 +1631,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -1618,7 +1631,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
*/ */
void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 tid, u8 flags); u8 tid, u8 flags);
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq); int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
/* Return a bitmask with all the hw supported queues, except for the /* Return a bitmask with all the hw supported queues, except for the
* command queue, which can't be flushed. * command queue, which can't be flushed.
...@@ -1725,6 +1738,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, ...@@ -1725,6 +1738,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
void iwl_mvm_reorder_timer_expired(unsigned long data); void iwl_mvm_reorder_timer_expired(unsigned long data);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
......
...@@ -310,6 +310,112 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, ...@@ -310,6 +310,112 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
} }
/* Disable aggregations for a bitmap of TIDs for a given station */
static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
unsigned long disable_agg_tids,
bool remove_queue)
{
struct iwl_mvm_add_sta_cmd cmd = {};
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
u32 status;
u8 sta_id;
int ret;
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
spin_unlock_bh(&mvm->queue_info_lock);
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
return -EINVAL;
}
mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmsta->tid_disable_agg |= disable_agg_tids;
cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
cmd.sta_id = mvmsta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
cmd.modify_mask = STA_MODIFY_QUEUES;
if (disable_agg_tids)
cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
if (remove_queue)
cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
rcu_read_unlock();
/* Notify FW of queue removal from the STA queues */
status = ADD_STA_SUCCESS;
ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
iwl_mvm_add_sta_cmd_size(mvm),
&cmd, &status);
return ret;
}
/*
* Remove a queue from a station's resources.
* Note that this only marks as free. It DOESN'T delete a BA agreement, and
* doesn't disable the queue
*/
static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
{
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
unsigned long tid_bitmap;
unsigned long disable_agg_tids = 0;
u8 sta_id;
int tid;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
spin_unlock_bh(&mvm->queue_info_lock);
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
return 0;
}
mvmsta = iwl_mvm_sta_from_mac80211(sta);
spin_lock_bh(&mvmsta->lock);
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
disable_agg_tids |= BIT(tid);
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
spin_unlock_bh(&mvmsta->lock);
rcu_read_unlock();
spin_lock(&mvm->queue_info_lock);
/* Unmap MAC queues and TIDs from this queue */
mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
mvm->queue_info[queue].hw_queue_refcount = 0;
mvm->queue_info[queue].tid_bitmap = 0;
spin_unlock(&mvm->queue_info_lock);
return disable_agg_tids;
}
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac, int tid, struct ieee80211_sta *sta, u8 ac, int tid,
struct ieee80211_hdr *hdr) struct ieee80211_hdr *hdr)
...@@ -325,6 +431,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -325,6 +431,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
u8 mac_queue = mvmsta->vif->hw_queue[ac]; u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1; int queue = -1;
bool using_inactive_queue = false;
unsigned long disable_agg_tids = 0;
enum iwl_mvm_agg_state queue_state;
int ssn; int ssn;
int ret; int ret;
...@@ -338,7 +447,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -338,7 +447,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
*/ */
if (!ieee80211_is_data_qos(hdr->frame_control) || if (!ieee80211_is_data_qos(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control)) { ieee80211_is_qos_nullfunc(hdr->frame_control)) {
queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE, queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_MGMT_QUEUE,
IWL_MVM_DQA_MAX_MGMT_QUEUE); IWL_MVM_DQA_MAX_MGMT_QUEUE);
if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
...@@ -347,15 +457,36 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -347,15 +457,36 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
/* If no such queue is found, we'll use a DATA queue instead */ /* If no such queue is found, we'll use a DATA queue instead */
} }
if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
(mvm->queue_info[mvmsta->reserved_queue].status ==
IWL_MVM_QUEUE_RESERVED ||
mvm->queue_info[mvmsta->reserved_queue].status ==
IWL_MVM_QUEUE_INACTIVE)) {
queue = mvmsta->reserved_queue; queue = mvmsta->reserved_queue;
mvm->queue_info[queue].reserved = true;
IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
} }
if (queue < 0) if (queue < 0)
queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE); IWL_MVM_DQA_MAX_DATA_QUEUE);
/*
* Check if this queue is already allocated but inactive.
* In such a case, we'll need to first free this queue before enabling
* it again, so we'll mark it as reserved to make sure no new traffic
* arrives on it
*/
if (queue > 0 &&
mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
using_inactive_queue = true;
IWL_DEBUG_TX_QUEUES(mvm,
"Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
queue, mvmsta->sta_id, tid);
}
/* /*
* Mark TXQ as ready, even though it hasn't been fully configured yet, * Mark TXQ as ready, even though it hasn't been fully configured yet,
* to make sure no one else takes it. * to make sure no one else takes it.
...@@ -380,6 +511,38 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -380,6 +511,38 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
/*
* If this queue was previously inactive (idle) - we need to free it
* first
*/
if (using_inactive_queue) {
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.enable = 0,
};
disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
/* Disable the queue */
iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids,
true);
iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
&cmd);
if (ret) {
IWL_ERR(mvm,
"Failed to free inactive queue %d (ret=%d)\n",
queue, ret);
/* Re-mark the inactive queue as inactive */
spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
spin_unlock_bh(&mvm->queue_info_lock);
return ret;
}
}
IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n", IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
queue, mvmsta->sta_id, tid); queue, mvmsta->sta_id, tid);
...@@ -389,7 +552,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -389,7 +552,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
spin_lock_bh(&mvmsta->lock); spin_lock_bh(&mvmsta->lock);
mvmsta->tid_data[tid].txq_id = queue; mvmsta->tid_data[tid].txq_id = queue;
mvmsta->tid_data[tid].is_tid_active = true;
mvmsta->tfd_queue_msk |= BIT(queue); mvmsta->tfd_queue_msk |= BIT(queue);
queue_state = mvmsta->tid_data[tid].state;
if (mvmsta->reserved_queue == queue) if (mvmsta->reserved_queue == queue)
mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
...@@ -399,7 +564,11 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ...@@ -399,7 +564,11 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (ret) if (ret)
goto out_err; goto out_err;
return 0; /* If we need to re-enable aggregations... */
if (queue_state == IWL_AGG_ON)
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
return ret;
out_err: out_err:
iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
...@@ -476,6 +645,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) ...@@ -476,6 +645,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
unsigned long deferred_tid_traffic; unsigned long deferred_tid_traffic;
int sta_id, tid; int sta_id, tid;
/* Check inactivity of queues */
iwl_mvm_inactivity_check(mvm);
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
/* Go over all stations with deferred traffic */ /* Go over all stations with deferred traffic */
...@@ -505,6 +677,12 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, ...@@ -505,6 +677,12 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int queue; int queue;
/*
* Check for inactive queues, so we don't reach a situation where we
* can't add a STA due to a shortage in queues that doesn't really exist
*/
iwl_mvm_inactivity_check(mvm);
spin_lock_bh(&mvm->queue_info_lock); spin_lock_bh(&mvm->queue_info_lock);
/* Make sure we have free resources for this STA */ /* Make sure we have free resources for this STA */
...@@ -514,7 +692,8 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, ...@@ -514,7 +692,8 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
IWL_MVM_QUEUE_FREE)) IWL_MVM_QUEUE_FREE))
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
else else
queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE); IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue < 0) { if (queue < 0) {
spin_unlock_bh(&mvm->queue_info_lock); spin_unlock_bh(&mvm->queue_info_lock);
...@@ -1403,8 +1582,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ...@@ -1403,8 +1582,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret; return ret;
} }
static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start) int tid, u8 queue, bool start)
{ {
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {}; struct iwl_mvm_add_sta_cmd cmd = {};
...@@ -1459,6 +1638,7 @@ const u8 tid_to_mac80211_ac[] = { ...@@ -1459,6 +1638,7 @@ const u8 tid_to_mac80211_ac[] = {
IEEE80211_AC_VI, IEEE80211_AC_VI,
IEEE80211_AC_VO, IEEE80211_AC_VO,
IEEE80211_AC_VO, IEEE80211_AC_VO,
IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
}; };
static const u8 tid_to_ucode_ac[] = { static const u8 tid_to_ucode_ac[] = {
...@@ -1513,7 +1693,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1513,7 +1693,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
txq_id = mvmsta->tid_data[tid].txq_id; txq_id = mvmsta->tid_data[tid].txq_id;
if (!iwl_mvm_is_dqa_supported(mvm) || if (!iwl_mvm_is_dqa_supported(mvm) ||
mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue, txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
mvm->first_agg_queue,
mvm->last_agg_queue); mvm->last_agg_queue);
if (txq_id < 0) { if (txq_id < 0) {
ret = txq_id; ret = txq_id;
......
...@@ -321,6 +321,9 @@ enum iwl_mvm_agg_state { ...@@ -321,6 +321,9 @@ enum iwl_mvm_agg_state {
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
* we are ready to finish the Tx AGG stop / start flow. * we are ready to finish the Tx AGG stop / start flow.
* @tx_time: medium time consumed by this A-MPDU * @tx_time: medium time consumed by this A-MPDU
* @is_tid_active: has this TID sent traffic in the last
* %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this
* field should be ignored.
*/ */
struct iwl_mvm_tid_data { struct iwl_mvm_tid_data {
struct sk_buff_head deferred_tx_frames; struct sk_buff_head deferred_tx_frames;
...@@ -333,6 +336,7 @@ struct iwl_mvm_tid_data { ...@@ -333,6 +336,7 @@ struct iwl_mvm_tid_data {
u16 txq_id; u16 txq_id;
u16 ssn; u16 ssn;
u16 tx_time; u16 tx_time;
bool is_tid_active;
}; };
static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
...@@ -509,6 +513,9 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -509,6 +513,9 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid); struct ieee80211_sta *sta, u16 tid);
int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start);
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm);
......
...@@ -884,9 +884,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -884,9 +884,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
* nullfunc frames should go to the MGMT queue regardless of QOS * nullfunc frames should go to the MGMT queue regardless of QOS
*/ */
tid = IWL_MAX_TID_COUNT; tid = IWL_MAX_TID_COUNT;
txq_id = mvmsta->tid_data[tid].txq_id;
} }
if (iwl_mvm_is_dqa_supported(mvm))
txq_id = mvmsta->tid_data[tid].txq_id;
/* Copy MAC header from skb into command buffer */ /* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen); memcpy(tx_cmd->hdr, hdr, hdrlen);
...@@ -905,9 +907,12 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -905,9 +907,12 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id = mvmsta->tid_data[tid].txq_id; txq_id = mvmsta->tid_data[tid].txq_id;
} }
if (iwl_mvm_is_dqa_supported(mvm)) { /* Check if TXQ needs to be allocated or re-activated */
if (unlikely(mvmsta->tid_data[tid].txq_id == if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE ||
IEEE80211_INVAL_HW_QUEUE)) { !mvmsta->tid_data[tid].is_tid_active) &&
iwl_mvm_is_dqa_supported(mvm)) {
/* If TXQ needs to be allocated... */
if (txq_id == IEEE80211_INVAL_HW_QUEUE) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
/* /*
...@@ -917,11 +922,22 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -917,11 +922,22 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock); spin_unlock(&mvmsta->lock);
return 0; return 0;
} }
txq_id = mvmsta->tid_data[tid].txq_id; /* If we are here - TXQ exists and needs to be re-activated */
spin_lock(&mvm->queue_info_lock);
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
mvmsta->tid_data[tid].is_tid_active = true;
spin_unlock(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
txq_id);
} }
/* Keep track of the time of the last frame for this RA/TID */
mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
......
...@@ -579,17 +579,29 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) ...@@ -579,17 +579,29 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm); iwl_mvm_dump_umac_error_log(mvm);
} }
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq) int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
{ {
int i; int i;
lockdep_assert_held(&mvm->queue_info_lock); lockdep_assert_held(&mvm->queue_info_lock);
/* Start by looking for a free queue */
for (i = minq; i <= maxq; i++) for (i = minq; i <= maxq; i++)
if (mvm->queue_info[i].hw_queue_refcount == 0 && if (mvm->queue_info[i].hw_queue_refcount == 0 &&
mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
return i; return i;
/*
* If no free queue found - settle for an inactive one to reconfigure
* Make sure that the inactive queue either already belongs to this STA,
* or that if it belongs to another one - it isn't the reserved queue
*/
for (i = minq; i <= maxq; i++)
if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
(sta_id == mvm->queue_info[i].ra_sta_id ||
!mvm->queue_info[i].reserved))
return i;
return -ENOSPC; return -ENOSPC;
} }
...@@ -650,6 +662,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -650,6 +662,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
else else
mvm->queue_info[queue].ra_sta_id = cfg->sta_id; mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
IWL_DEBUG_TX_QUEUES(mvm, IWL_DEBUG_TX_QUEUES(mvm,
"Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
...@@ -752,6 +765,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ...@@ -752,6 +765,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].tid_bitmap = 0; mvm->queue_info[queue].tid_bitmap = 0;
mvm->queue_info[queue].hw_queue_to_mac80211 = 0; mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
mvm->queue_info[queue].reserved = false;
spin_unlock_bh(&mvm->queue_info_lock); spin_unlock_bh(&mvm->queue_info_lock);
iwl_trans_txq_disable(mvm->trans, queue, false); iwl_trans_txq_disable(mvm->trans, queue, false);
...@@ -1039,6 +1055,154 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -1039,6 +1055,154 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_connection_loss(vif); ieee80211_connection_loss(vif);
} }
/*
* Remove inactive TIDs of a given queue.
* If all queue TIDs are inactive - mark the queue as inactive
* If only some the queue TIDs are inactive - unmap them from the queue
*/
static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta, int queue,
unsigned long tid_bitmap)
{
int tid;
lockdep_assert_held(&mvmsta->lock);
lockdep_assert_held(&mvm->queue_info_lock);
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
/* If some TFDs are still queued - don't mark TID as inactive */
if (iwl_mvm_tid_queued(&mvmsta->tid_data[tid]))
tid_bitmap &= ~BIT(tid);
}
/* If all TIDs in the queue are inactive - mark queue as inactive. */
if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
mvmsta->tid_data[tid].is_tid_active = false;
IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
queue);
return;
}
/*
* If we are here, this is a shared queue and not all TIDs timed-out.
* Remove the ones that did.
*/
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
mvm->queue_info[queue].hw_queue_to_mac80211 &= ~BIT(mac_queue);
mvm->queue_info[queue].hw_queue_refcount--;
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
mvmsta->tid_data[tid].is_tid_active = false;
IWL_DEBUG_TX_QUEUES(mvm,
"Removing inactive TID %d from shared Q:%d\n",
tid, queue);
}
IWL_DEBUG_TX_QUEUES(mvm,
"TXQ #%d left with tid bitmap 0x%x\n", queue,
mvm->queue_info[queue].tid_bitmap);
/*
* There may be different TIDs with the same mac queues, so make
* sure all TIDs have existing corresponding mac queues enabled
*/
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
mvm->queue_info[queue].hw_queue_to_mac80211 |=
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
}
/* TODO: if queue was shared - need to re-enable AGGs */
}
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
{
unsigned long timeout_queues_map = 0;
unsigned long now = jiffies;
int i;
spin_lock_bh(&mvm->queue_info_lock);
for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
if (mvm->queue_info[i].hw_queue_refcount > 0)
timeout_queues_map |= BIT(i);
spin_unlock_bh(&mvm->queue_info_lock);
rcu_read_lock();
/*
* If a queue time outs - mark it as INACTIVE (don't remove right away
* if we don't have to.) This is an optimization in case traffic comes
* later, and we don't HAVE to use a currently-inactive queue
*/
for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
u8 sta_id;
int tid;
unsigned long inactive_tid_bitmap = 0;
unsigned long queue_tid_bitmap;
spin_lock_bh(&mvm->queue_info_lock);
queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
/* If TXQ isn't in active use anyway - nothing to do here... */
if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY) {
spin_unlock_bh(&mvm->queue_info_lock);
continue;
}
/* Check to see if there are inactive TIDs on this queue */
for_each_set_bit(tid, &queue_tid_bitmap,
IWL_MAX_TID_COUNT + 1) {
if (time_after(mvm->queue_info[i].last_frame_time[tid] +
IWL_MVM_DQA_QUEUE_TIMEOUT, now))
continue;
inactive_tid_bitmap |= BIT(tid);
}
spin_unlock_bh(&mvm->queue_info_lock);
/* If all TIDs are active - finish check on this queue */
if (!inactive_tid_bitmap)
continue;
/*
* If we are here - the queue hadn't been served recently and is
* in use
*/
sta_id = mvm->queue_info[i].ra_sta_id;
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
/*
* If the STA doesn't exist anymore, it isn't an error. It could
* be that it was removed since getting the queues, and in this
* case it should've inactivated its queues anyway.
*/
if (IS_ERR_OR_NULL(sta))
continue;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
spin_lock_bh(&mvmsta->lock);
spin_lock(&mvm->queue_info_lock);
iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
inactive_tid_bitmap);
spin_unlock(&mvm->queue_info_lock);
spin_unlock_bh(&mvmsta->lock);
}
rcu_read_unlock();
}
int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
enum iwl_lqm_cmd_operatrions operation, enum iwl_lqm_cmd_operatrions operation,
u32 duration, u32 timeout) u32 duration, u32 timeout)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment