Commit d91547c0 authored by John W. Linville's avatar John W. Linville
parents e248ad30 e3d4bc8c
...@@ -173,6 +173,8 @@ enum { ...@@ -173,6 +173,8 @@ enum {
REPLY_DEBUG_CMD = 0xf0, REPLY_DEBUG_CMD = 0xf0,
DEBUG_LOG_MSG = 0xf7, DEBUG_LOG_MSG = 0xf7,
MCAST_FILTER_CMD = 0xd0,
/* D3 commands/notifications */ /* D3 commands/notifications */
D3_CONFIG_CMD = 0xd3, D3_CONFIG_CMD = 0xd3,
PROT_OFFLOAD_CONFIG_CMD = 0xd4, PROT_OFFLOAD_CONFIG_CMD = 0xd4,
...@@ -948,4 +950,29 @@ struct iwl_set_calib_default_cmd { ...@@ -948,4 +950,29 @@ struct iwl_set_calib_default_cmd {
u8 data[0]; u8 data[0];
} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */ } __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */
#define MAX_PORT_ID_NUM 2
/**
* struct iwl_mcast_filter_cmd - configure multicast filter.
* @filter_own: Set 1 to filter out multicast packets sent by station itself
* @port_id: Multicast MAC addresses array specifier. This is a strange way
* to identify network interface adopted in host-device IF.
* It is used by FW as index in array of addresses. This array has
* MAX_PORT_ID_NUM members.
* @count: Number of MAC addresses in the array
* @pass_all: Set 1 to pass all multicast packets.
* @bssid: current association BSSID.
* @addr_list: Place holder for array of MAC addresses.
* IMPORTANT: add padding if necessary to ensure DWORD alignment.
*/
struct iwl_mcast_filter_cmd {
u8 filter_own;
u8 port_id;
u8 count;
u8 pass_all;
u8 bssid[6];
u8 reserved[2];
u8 addr_list[0];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
#endif /* __fw_api_h__ */ #endif /* __fw_api_h__ */
...@@ -586,10 +586,12 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, ...@@ -586,10 +586,12 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
*/ */
static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm, static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct iwl_mac_data_sta *ctxt_sta) struct iwl_mac_data_sta *ctxt_sta,
bool force_assoc_off)
{ {
/* We need the dtim_period to set the MAC as associated */ /* We need the dtim_period to set the MAC as associated */
if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) { if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
!force_assoc_off) {
u32 dtim_offs; u32 dtim_offs;
/* /*
...@@ -659,7 +661,8 @@ static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm, ...@@ -659,7 +661,8 @@ static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON); cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON);
/* Fill the data specific for station mode */ /* Fill the data specific for station mode */
iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta); iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta,
action == FW_CTXT_ACTION_ADD);
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
} }
...@@ -677,7 +680,8 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm, ...@@ -677,7 +680,8 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
/* Fill the data specific for station mode */ /* Fill the data specific for station mode */
iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta); iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta,
action == FW_CTXT_ACTION_ADD);
cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
IEEE80211_P2P_OPPPS_CTWINDOW_MASK); IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
......
...@@ -701,6 +701,20 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, ...@@ -701,6 +701,20 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
*total_flags = 0; *total_flags = 0;
} }
static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mcast_filter_cmd mcast_filter_cmd = {
.pass_all = 1,
};
memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN);
return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC,
sizeof(mcast_filter_cmd),
&mcast_filter_cmd);
}
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, struct ieee80211_bss_conf *bss_conf,
...@@ -722,6 +736,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, ...@@ -722,6 +736,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
return; return;
} }
iwl_mvm_bt_coex_vif_assoc(mvm, vif); iwl_mvm_bt_coex_vif_assoc(mvm, vif);
iwl_mvm_configure_mcast_filter(mvm, vif);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/* remove AP station now that the MAC is unassoc */ /* remove AP station now that the MAC is unassoc */
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
...@@ -931,7 +946,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, ...@@ -931,7 +946,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
switch (cmd) { switch (cmd) {
case STA_NOTIFY_SLEEP: case STA_NOTIFY_SLEEP:
if (atomic_read(&mvmsta->pending_frames) > 0) if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
ieee80211_sta_block_awake(hw, sta, true); ieee80211_sta_block_awake(hw, sta, true);
/* /*
* The fw updates the STA to be asleep. Tx packets on the Tx * The fw updates the STA to be asleep. Tx packets on the Tx
......
...@@ -292,6 +292,7 @@ struct iwl_mvm { ...@@ -292,6 +292,7 @@ struct iwl_mvm {
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
struct work_struct sta_drained_wk; struct work_struct sta_drained_wk;
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
atomic_t pending_frames[IWL_MVM_STATION_COUNT];
/* configured by mac80211 */ /* configured by mac80211 */
u32 rts_threshold; u32 rts_threshold;
......
...@@ -292,6 +292,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = { ...@@ -292,6 +292,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BT_COEX_PROT_ENV), CMD(BT_COEX_PROT_ENV),
CMD(BT_PROFILE_NOTIFICATION), CMD(BT_PROFILE_NOTIFICATION),
CMD(BT_CONFIG), CMD(BT_CONFIG),
CMD(MCAST_FILTER_CMD),
}; };
#undef CMD #undef CMD
......
...@@ -298,6 +298,12 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm, ...@@ -298,6 +298,12 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
else else
cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
/*
* TODO: This is a WA due to a bug in the FW AUX framework that does not
* properly handle time events that fail to be scheduled
*/
cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
cmd->repeats = cpu_to_le32(1); cmd->repeats = cpu_to_le32(1);
/* /*
......
...@@ -219,7 +219,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ...@@ -219,7 +219,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
/* HW restart, don't assume the memory has been zeroed */ /* HW restart, don't assume the memory has been zeroed */
atomic_set(&mvm_sta->pending_frames, 0); atomic_set(&mvm->pending_frames[sta_id], 0);
mvm_sta->tid_disable_agg = 0; mvm_sta->tid_disable_agg = 0;
mvm_sta->tfd_queue_msk = 0; mvm_sta->tfd_queue_msk = 0;
for (i = 0; i < IEEE80211_NUM_ACS; i++) for (i = 0; i < IEEE80211_NUM_ACS; i++)
...@@ -406,15 +406,22 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ...@@ -406,15 +406,22 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
} }
/*
* Make sure that the tx response code sees the station as -EBUSY and
* calls the drain worker.
*/
spin_lock_bh(&mvm_sta->lock);
/* /*
* There are frames pending on the AC queues for this station. * There are frames pending on the AC queues for this station.
* We need to wait until all the frames are drained... * We need to wait until all the frames are drained...
*/ */
if (atomic_read(&mvm_sta->pending_frames)) { if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
ERR_PTR(-EBUSY)); ERR_PTR(-EBUSY));
spin_unlock_bh(&mvm_sta->lock);
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
} else { } else {
spin_unlock_bh(&mvm_sta->lock);
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
} }
......
...@@ -274,7 +274,6 @@ struct iwl_mvm_tid_data { ...@@ -274,7 +274,6 @@ struct iwl_mvm_tid_data {
* @bt_reduced_txpower: is reduced tx power enabled for this station * @bt_reduced_txpower: is reduced tx power enabled for this station
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
* and from Tx response flow, it needs a spinlock. * and from Tx response flow, it needs a spinlock.
* @pending_frames: number of frames for this STA on the shared Tx queues.
* @tid_data: per tid data. Look at %iwl_mvm_tid_data. * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
* *
* When mac80211 creates a station it reserves some space (hw->sta_data_size) * When mac80211 creates a station it reserves some space (hw->sta_data_size)
...@@ -290,7 +289,6 @@ struct iwl_mvm_sta { ...@@ -290,7 +289,6 @@ struct iwl_mvm_sta {
u8 max_agg_bufsize; u8 max_agg_bufsize;
bool bt_reduced_txpower; bool bt_reduced_txpower;
spinlock_t lock; spinlock_t lock;
atomic_t pending_frames;
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
struct iwl_lq_sta lq_sta; struct iwl_lq_sta lq_sta;
struct ieee80211_vif *vif; struct ieee80211_vif *vif;
......
...@@ -416,9 +416,8 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, ...@@ -416,9 +416,8 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock); spin_unlock(&mvmsta->lock);
if (mvmsta->vif->type == NL80211_IFTYPE_AP && if (txq_id < IWL_MVM_FIRST_AGG_QUEUE)
txq_id < IWL_MVM_FIRST_AGG_QUEUE) atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
atomic_inc(&mvmsta->pending_frames);
return 0; return 0;
...@@ -680,17 +679,42 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ...@@ -680,17 +679,42 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
/* /*
* If the txq is not an AMPDU queue, there is no chance we freed * If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out... * several skbs. Check that out...
* If there are no pending frames for this STA, notify mac80211 that
* this station can go to sleep in its STA table.
*/ */
if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && mvmsta && if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && !WARN_ON(skb_freed > 1) &&
!WARN_ON(skb_freed > 1) && atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
mvmsta->vif->type == NL80211_IFTYPE_AP && if (mvmsta) {
atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) { /*
* If there are no pending frames for this STA, notify
* mac80211 that this station can go to sleep in its
* STA table.
*/
if (mvmsta->vif->type == NL80211_IFTYPE_AP)
ieee80211_sta_block_awake(mvm->hw, sta, false); ieee80211_sta_block_awake(mvm->hw, sta, false);
/*
* We might very well have taken mvmsta pointer while
* the station was being removed. The remove flow might
* have seen a pending_frame (because we didn't take
* the lock) even if now the queues are drained. So make
* really sure now that this the station is not being
* removed. If it is, run the drain worker to remove it.
*/
spin_lock_bh(&mvmsta->lock);
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (IS_ERR_OR_NULL(sta)) {
/*
* Station disappeared in the meantime:
* so we are draining.
*/
set_bit(sta_id, mvm->sta_drained);
schedule_work(&mvm->sta_drained_wk);
}
spin_unlock_bh(&mvmsta->lock);
} else if (!mvmsta) {
/* Tx response without STA, so we are draining */
set_bit(sta_id, mvm->sta_drained); set_bit(sta_id, mvm->sta_drained);
schedule_work(&mvm->sta_drained_wk); schedule_work(&mvm->sta_drained_wk);
} }
}
rcu_read_unlock(); rcu_read_unlock();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment