Commit 3fbd45ca authored by Johannes Berg's avatar Johannes Berg

mac80211: fix remain-on-channel cancel crash

If a ROC item is canceled just as it expires, the work
struct may be scheduled while it is running (and waiting
for the mutex). This results in it being run after being
freed, which obviously crashes.

To fix this don't free it when aborting is requested but
instead mark it as "to be freed", which makes the work a
no-op and allows freeing it outside.

Cc: stable@vger.kernel.org [3.6+]
Reported-by: default avatarJouni Malinen <j@w1.fi>
Tested-by: default avatarJouni Malinen <j@w1.fi>
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent 370bd005
......@@ -2582,7 +2582,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
list_del(&dep->list);
mutex_unlock(&local->mtx);
ieee80211_roc_notify_destroy(dep);
ieee80211_roc_notify_destroy(dep, true);
return 0;
}
......@@ -2622,7 +2622,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
ieee80211_start_next_roc(local);
mutex_unlock(&local->mtx);
ieee80211_roc_notify_destroy(found);
ieee80211_roc_notify_destroy(found, true);
} else {
/* work may be pending so use it all the time */
found->abort = true;
......@@ -2632,6 +2632,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
/* work will clean up etc */
flush_delayed_work(&found->work);
WARN_ON(!found->to_be_freed);
kfree(found);
}
return 0;
......
......@@ -309,6 +309,7 @@ struct ieee80211_roc_work {
struct ieee80211_channel *chan;
bool started, abort, hw_begun, notified;
bool to_be_freed;
unsigned long hw_start_time;
......@@ -1347,7 +1348,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local);
void ieee80211_roc_setup(struct ieee80211_local *local);
void ieee80211_start_next_roc(struct ieee80211_local *local);
void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc);
void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free);
void ieee80211_sw_roc_work(struct work_struct *work);
void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
......
......@@ -297,10 +297,13 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
}
}
void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free)
{
struct ieee80211_roc_work *dep, *tmp;
if (WARN_ON(roc->to_be_freed))
return;
/* was never transmitted */
if (roc->frame) {
cfg80211_mgmt_tx_status(&roc->sdata->wdev,
......@@ -316,9 +319,12 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
GFP_KERNEL);
list_for_each_entry_safe(dep, tmp, &roc->dependents, list)
ieee80211_roc_notify_destroy(dep);
ieee80211_roc_notify_destroy(dep, true);
kfree(roc);
if (free)
kfree(roc);
else
roc->to_be_freed = true;
}
void ieee80211_sw_roc_work(struct work_struct *work)
......@@ -331,6 +337,9 @@ void ieee80211_sw_roc_work(struct work_struct *work)
mutex_lock(&local->mtx);
if (roc->to_be_freed)
goto out_unlock;
if (roc->abort)
goto finish;
......@@ -370,7 +379,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
finish:
list_del(&roc->list);
started = roc->started;
ieee80211_roc_notify_destroy(roc);
ieee80211_roc_notify_destroy(roc, !roc->abort);
if (started) {
drv_flush(local, false);
......@@ -410,7 +419,7 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
list_del(&roc->list);
ieee80211_roc_notify_destroy(roc);
ieee80211_roc_notify_destroy(roc, true);
/* if there's another roc, start it now */
ieee80211_start_next_roc(local);
......@@ -460,12 +469,14 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)
list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
if (local->ops->remain_on_channel) {
list_del(&roc->list);
ieee80211_roc_notify_destroy(roc);
ieee80211_roc_notify_destroy(roc, true);
} else {
ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
/* work will clean up etc */
flush_delayed_work(&roc->work);
WARN_ON(!roc->to_be_freed);
kfree(roc);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment