Commit 2f33561e authored by Miri Korenblit's avatar Miri Korenblit Committed by Johannes Berg

wifi: iwlwifi: mvm: trigger link selection after exiting EMLSR

If the reason for exiting EMLSR was a blocking reason, wait for the
corresponding unblocking event:
- if there is an ongoing scan - do nothing. Link selection will be
  triggered at the end of it.
- If more than 30 seconds passed since the exit, trigger MLO scan, which
  will trigger link selection
- If less then 30 seconds passed since exit, reuse the latest link
  selection result

If the reason for exiting EMLSR was an exit reason (IWL_MVM_EXIT_*),
schedule MLO scan in 30 seconds.
Signed-off-by: default avatarMiri Korenblit <miriam.rachel.korenblit@intel.com>
Reviewed-by: default avatarIlan Peer <ilan.peer@intel.com>
Link: https://msgid.link/20240505091420.6a808c4ae8f5.Ia79605838eb6deee9358bec633ef537f2653db92@changeidSigned-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent 72c19df2
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69 #define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69
#define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63 #define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63
#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0 #define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
#define IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC 30
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
......
...@@ -712,31 +712,7 @@ static ssize_t iwl_dbgfs_int_mlo_scan_write(struct ieee80211_vif *vif, ...@@ -712,31 +712,7 @@ static ssize_t iwl_dbgfs_int_mlo_scan_write(struct ieee80211_vif *vif,
if (!action) { if (!action) {
ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false); ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
} else if (action == 1) { } else if (action == 1) {
struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS]; ret = iwl_mvm_int_mlo_scan(mvm, vif);
unsigned long usable_links = ieee80211_vif_usable_links(vif);
size_t n_channels = 0;
u8 link_id;
rcu_read_lock();
for_each_set_bit(link_id, &usable_links,
IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_bss_conf *link_conf =
rcu_dereference(vif->link_conf[link_id]);
if (WARN_ON_ONCE(!link_conf))
continue;
channels[n_channels++] = link_conf->chanreq.oper.chan;
}
rcu_read_unlock();
if (n_channels)
ret = iwl_mvm_int_mlo_scan_start(mvm, vif, channels,
n_channels);
else
ret = -EINVAL;
} else { } else {
ret = -EINVAL; ret = -EINVAL;
} }
......
...@@ -787,37 +787,42 @@ u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id) ...@@ -787,37 +787,42 @@ u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id)
#define IWL_MVM_ESR_PREVENT_SHORT (HZ * 300) #define IWL_MVM_ESR_PREVENT_SHORT (HZ * 300)
#define IWL_MVM_ESR_PREVENT_LONG (HZ * 600) #define IWL_MVM_ESR_PREVENT_LONG (HZ * 600)
static void iwl_mvm_recalc_esr_prevention(struct iwl_mvm *mvm, static bool iwl_mvm_check_esr_prevention(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif, struct iwl_mvm_vif *mvmvif,
enum iwl_mvm_esr_state reason) enum iwl_mvm_esr_state reason)
{ {
unsigned long now = jiffies; bool timeout_expired = time_after(jiffies,
unsigned long delay; mvmvif->last_esr_exit.ts +
bool timeout_expired =
time_after(now, mvmvif->last_esr_exit.ts +
IWL_MVM_PREVENT_ESR_TIMEOUT); IWL_MVM_PREVENT_ESR_TIMEOUT);
unsigned long delay;
if (WARN_ON(!(IWL_MVM_ESR_PREVENT_REASONS & reason)))
return;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
mvmvif->last_esr_exit.ts = now; /* Only handle reasons that can cause prevention */
if (!(reason & IWL_MVM_ESR_PREVENT_REASONS))
return false;
if (timeout_expired || /*
mvmvif->last_esr_exit.reason != reason) { * Reset the counter if more than 400 seconds have passed between one
mvmvif->last_esr_exit.reason = reason; * exit and the other, or if we exited due to a different reason.
* Will also reset the counter after the long prevention is done.
*/
if (timeout_expired || mvmvif->last_esr_exit.reason != reason) {
mvmvif->exit_same_reason_count = 1; mvmvif->exit_same_reason_count = 1;
return; return false;
} }
mvmvif->exit_same_reason_count++; mvmvif->exit_same_reason_count++;
if (WARN_ON(mvmvif->exit_same_reason_count < 2 || if (WARN_ON(mvmvif->exit_same_reason_count < 2 ||
mvmvif->exit_same_reason_count > 3)) mvmvif->exit_same_reason_count > 3))
return; return false;
mvmvif->esr_disable_reason |= IWL_MVM_ESR_BLOCKED_PREVENTION; mvmvif->esr_disable_reason |= IWL_MVM_ESR_BLOCKED_PREVENTION;
/*
* For the second exit, use a short prevention, and for the third one,
* use a long prevention.
*/
delay = mvmvif->exit_same_reason_count == 2 ? delay = mvmvif->exit_same_reason_count == 2 ?
IWL_MVM_ESR_PREVENT_SHORT : IWL_MVM_ESR_PREVENT_SHORT :
IWL_MVM_ESR_PREVENT_LONG; IWL_MVM_ESR_PREVENT_LONG;
...@@ -828,8 +833,11 @@ static void iwl_mvm_recalc_esr_prevention(struct iwl_mvm *mvm, ...@@ -828,8 +833,11 @@ static void iwl_mvm_recalc_esr_prevention(struct iwl_mvm *mvm,
wiphy_delayed_work_queue(mvm->hw->wiphy, wiphy_delayed_work_queue(mvm->hw->wiphy,
&mvmvif->prevent_esr_done_wk, delay); &mvmvif->prevent_esr_done_wk, delay);
return true;
} }
#define IWL_MVM_TRIGGER_LINK_SEL_TIME (IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC * HZ)
/* API to exit eSR mode */ /* API to exit eSR mode */
void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_esr_state reason, enum iwl_mvm_esr_state reason,
...@@ -837,6 +845,7 @@ void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -837,6 +845,7 @@ void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u16 new_active_links; u16 new_active_links;
bool prevented;
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
...@@ -857,8 +866,25 @@ void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -857,8 +866,25 @@ void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_set_active_links_async(vif, new_active_links); ieee80211_set_active_links_async(vif, new_active_links);
if (IWL_MVM_ESR_PREVENT_REASONS & reason) /* Prevent EMLSR if needed */
iwl_mvm_recalc_esr_prevention(mvm, mvmvif, reason); prevented = iwl_mvm_check_esr_prevention(mvm, mvmvif, reason);
/* Remember why and when we exited EMLSR */
mvmvif->last_esr_exit.ts = jiffies;
mvmvif->last_esr_exit.reason = reason;
/*
* If EMLSR is prevented now - don't try to get back to EMLSR.
* If we exited due to a blocking event, we will try to get back to
* EMLSR when the corresponding unblocking event will happen.
*/
if (prevented || reason & IWL_MVM_BLOCK_ESR_REASONS)
return;
/* If EMLSR is not blocked - try enabling it again in 30 seconds */
wiphy_delayed_work_queue(mvm->hw->wiphy,
&mvmvif->mlo_int_scan_wk,
round_jiffies_relative(IWL_MVM_TRIGGER_LINK_SEL_TIME));
} }
void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
...@@ -882,6 +908,43 @@ void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -882,6 +908,43 @@ void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep); iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep);
} }
static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
bool need_new_sel = time_after(jiffies, mvmvif->last_esr_exit.ts +
IWL_MVM_TRIGGER_LINK_SEL_TIME);
lockdep_assert_held(&mvm->mutex);
if (!ieee80211_vif_is_mld(vif) || !mvmvif->authorized ||
mvmvif->esr_active)
return;
IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n");
/*
* If EMLSR was blocked for more than 30 seconds, or the last link
* selection decided to not enter EMLSR, trigger a new scan.
*/
if (need_new_sel || hweight16(mvmvif->link_selection_res) < 2) {
IWL_DEBUG_INFO(mvm, "Trigger MLO scan\n");
wiphy_delayed_work_queue(mvm->hw->wiphy,
&mvmvif->mlo_int_scan_wk, 0);
/*
* If EMLSR was blocked for less than 30 seconds, and the last link
* selection decided to use EMLSR, activate EMLSR using the previous
* link selection result.
*/
} else {
IWL_DEBUG_INFO(mvm,
"Use the latest link selection result: 0x%x\n",
mvmvif->link_selection_res);
ieee80211_set_active_links_async(vif,
mvmvif->link_selection_res);
}
}
void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_esr_state reason) enum iwl_mvm_esr_state reason)
{ {
...@@ -898,4 +961,7 @@ void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -898,4 +961,7 @@ void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
reason); reason);
mvmvif->esr_disable_reason &= ~reason; mvmvif->esr_disable_reason &= ~reason;
if (!mvmvif->esr_disable_reason)
iwl_mvm_esr_unblocked(mvm, vif);
} }
...@@ -1625,6 +1625,20 @@ static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy, ...@@ -1625,6 +1625,20 @@ static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy,
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
} }
static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk)
{
struct iwl_mvm_vif *mvmvif = container_of(wk, struct iwl_mvm_vif,
mlo_int_scan_wk.work);
struct ieee80211_vif *vif =
container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
mutex_lock(&mvmvif->mvm->mutex);
iwl_mvm_int_mlo_scan(mvmvif->mvm, vif);
mutex_unlock(&mvmvif->mvm->mutex);
}
void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif) void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
{ {
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
...@@ -1637,6 +1651,9 @@ void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif) ...@@ -1637,6 +1651,9 @@ void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
wiphy_delayed_work_init(&mvmvif->prevent_esr_done_wk, wiphy_delayed_work_init(&mvmvif->prevent_esr_done_wk,
iwl_mvm_prevent_esr_done_wk); iwl_mvm_prevent_esr_done_wk);
wiphy_delayed_work_init(&mvmvif->mlo_int_scan_wk,
iwl_mvm_mlo_int_scan_wk);
} }
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
...@@ -1783,6 +1800,9 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, ...@@ -1783,6 +1800,9 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
wiphy_delayed_work_cancel(mvm->hw->wiphy, wiphy_delayed_work_cancel(mvm->hw->wiphy,
&mvmvif->prevent_esr_done_wk); &mvmvif->prevent_esr_done_wk);
wiphy_delayed_work_cancel(mvm->hw->wiphy,
&mvmvif->mlo_int_scan_wk);
cancel_delayed_work_sync(&mvmvif->csa_work); cancel_delayed_work_sync(&mvmvif->csa_work);
} }
...@@ -3877,7 +3897,6 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw, ...@@ -3877,7 +3897,6 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
return callbacks->update_sta(mvm, vif, sta); return callbacks->update_sta(mvm, vif, sta);
} }
static int static int
iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm, iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
...@@ -3901,7 +3920,7 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm, ...@@ -3901,7 +3920,7 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif)); WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
mvmvif->authorized = 1; mvmvif->authorized = 1;
mvmvif->link_selection_res = 0; mvmvif->link_selection_res = vif->active_links;
mvmvif->link_selection_primary = mvmvif->link_selection_primary =
vif->active_links ? __ffs(vif->active_links) : 0; vif->active_links ? __ffs(vif->active_links) : 0;
...@@ -3968,6 +3987,9 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm, ...@@ -3968,6 +3987,9 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
wiphy_delayed_work_cancel(mvm->hw->wiphy, wiphy_delayed_work_cancel(mvm->hw->wiphy,
&mvmvif->prevent_esr_done_wk); &mvmvif->prevent_esr_done_wk);
wiphy_delayed_work_cancel(mvm->hw->wiphy,
&mvmvif->mlo_int_scan_wk);
/* No need for the periodic statistics anymore */ /* No need for the periodic statistics anymore */
if (ieee80211_vif_is_mld(vif) && mvmvif->esr_active) if (ieee80211_vif_is_mld(vif) && mvmvif->esr_active)
iwl_mvm_request_periodic_system_statistics(mvm, false); iwl_mvm_request_periodic_system_statistics(mvm, false);
......
...@@ -429,6 +429,7 @@ struct iwl_mvm_esr_exit { ...@@ -429,6 +429,7 @@ struct iwl_mvm_esr_exit {
* @last_esr_exit::reason, only counting exits due to * @last_esr_exit::reason, only counting exits due to
* &IWL_MVM_ESR_PREVENT_REASONS. * &IWL_MVM_ESR_PREVENT_REASONS.
* @prevent_esr_done_wk: work that should be done when esr prevention ends. * @prevent_esr_done_wk: work that should be done when esr prevention ends.
* @mlo_int_scan_wk: work for the internal MLO scan.
*/ */
struct iwl_mvm_vif { struct iwl_mvm_vif {
struct iwl_mvm *mvm; struct iwl_mvm *mvm;
...@@ -525,6 +526,7 @@ struct iwl_mvm_vif { ...@@ -525,6 +526,7 @@ struct iwl_mvm_vif {
struct iwl_mvm_esr_exit last_esr_exit; struct iwl_mvm_esr_exit last_esr_exit;
u8 exit_same_reason_count; u8 exit_same_reason_count;
struct wiphy_delayed_work prevent_esr_done_wk; struct wiphy_delayed_work prevent_esr_done_wk;
struct wiphy_delayed_work mlo_int_scan_wk;
struct iwl_mvm_vif_link_info deflink; struct iwl_mvm_vif_link_info deflink;
struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS]; struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS];
...@@ -2089,13 +2091,11 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -2089,13 +2091,11 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_scan_ies *ies); struct ieee80211_scan_ies *ies);
size_t iwl_mvm_scan_size(struct iwl_mvm *mvm); size_t iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_channel **channels,
size_t n_channels);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
void iwl_mvm_scan_timeout_wk(struct work_struct *work); void iwl_mvm_scan_timeout_wk(struct work_struct *work);
int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
/* Scheduled scan */ /* Scheduled scan */
void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
......
...@@ -3202,6 +3202,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, ...@@ -3202,6 +3202,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_umac_scan_complete *notif = (void *)pkt->data; struct iwl_umac_scan_complete *notif = (void *)pkt->data;
u32 uid = __le32_to_cpu(notif->uid); u32 uid = __le32_to_cpu(notif->uid);
bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
bool select_links = false;
mvm->mei_scan_filter.is_mei_limited_scan = false; mvm->mei_scan_filter.is_mei_limited_scan = false;
...@@ -3235,6 +3236,11 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, ...@@ -3235,6 +3236,11 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_INT_MLO) { } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_INT_MLO) {
IWL_DEBUG_SCAN(mvm, "Internal MLO scan completed\n"); IWL_DEBUG_SCAN(mvm, "Internal MLO scan completed\n");
/*
* Other scan types won't necessarily scan for the MLD links channels.
* Therefore, only select links after successful internal scan.
*/
select_links = notif->status == IWL_SCAN_OFFLOAD_COMPLETED;
} }
mvm->scan_status &= ~mvm->scan_uid_status[uid]; mvm->scan_status &= ~mvm->scan_uid_status[uid];
...@@ -3255,7 +3261,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, ...@@ -3255,7 +3261,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
mvm->scan_uid_status[uid] = 0; mvm->scan_uid_status[uid] = 0;
if (notif->status == IWL_SCAN_OFFLOAD_COMPLETED) if (select_links)
iwl_mvm_post_scan_link_selection(mvm); iwl_mvm_post_scan_link_selection(mvm);
} }
...@@ -3517,7 +3523,8 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) ...@@ -3517,7 +3523,8 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
return ret; return ret;
} }
int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_channel **channels, struct ieee80211_channel **channels,
size_t n_channels) size_t n_channels)
{ {
...@@ -3563,3 +3570,37 @@ int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ...@@ -3563,3 +3570,37 @@ int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_DEBUG_SCAN(mvm, "Internal MLO scan: ret=%d\n", ret); IWL_DEBUG_SCAN(mvm, "Internal MLO scan: ret=%d\n", ret);
return ret; return ret;
} }
int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS];
unsigned long usable_links = ieee80211_vif_usable_links(vif);
size_t n_channels = 0;
u8 link_id;
lockdep_assert_held(&mvm->mutex);
if (mvm->scan_status & IWL_MVM_SCAN_INT_MLO) {
IWL_DEBUG_SCAN(mvm, "Internal MLO scan is already running\n");
return -EBUSY;
}
rcu_read_lock();
for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_bss_conf *link_conf =
rcu_dereference(vif->link_conf[link_id]);
if (WARN_ON_ONCE(!link_conf))
continue;
channels[n_channels++] = link_conf->chanreq.oper.chan;
}
rcu_read_unlock();
if (!n_channels)
return -EINVAL;
return iwl_mvm_int_mlo_scan_start(mvm, vif, channels, n_channels);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment