Commit e937b8da authored by Toke Høiland-Jørgensen's avatar Toke Høiland-Jørgensen Committed by Johannes Berg

mac80211: Add TXQ scheduling API

This adds an API to mac80211 to handle scheduling of TXQs and changes the
interface between driver and mac80211 for TXQ handling as follows:

- The wake_tx_queue callback interface no longer includes the TXQ. Instead,
  the driver is expected to retrieve that from ieee80211_next_txq()

- Two new mac80211 functions are added: ieee80211_next_txq() and
  ieee80211_schedule_txq(). The former returns the next TXQ that should be
  scheduled, and is how the driver gets a queue to pull packets from. The
  latter is called internally by mac80211 to start scheduling a queue, and
  the driver is supposed to call it to re-schedule the TXQ after it is
  finished pulling packets from it (unless the queue emptied).

The ath9k and ath10k drivers are changed to use the new API.
Signed-off-by: default avatarToke Høiland-Jørgensen <toke@toke.dk>
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent 9de18d81
......@@ -2574,9 +2574,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
mutex_init(&ar->conf_mutex);
spin_lock_init(&ar->data_lock);
spin_lock_init(&ar->txqs_lock);
INIT_LIST_HEAD(&ar->txqs);
INIT_LIST_HEAD(&ar->peers);
init_waitqueue_head(&ar->peer_mapping_wq);
init_waitqueue_head(&ar->htt.empty_tx_wq);
......
......@@ -347,7 +347,6 @@ struct ath10k_peer {
};
struct ath10k_txq {
struct list_head list;
unsigned long num_fw_queued;
unsigned long num_push_allowed;
};
......@@ -895,10 +894,7 @@ struct ath10k {
/* protects shared structure data */
spinlock_t data_lock;
/* protects: ar->txqs, artxq->list */
spinlock_t txqs_lock;
struct list_head txqs;
struct list_head arvifs;
struct list_head peers;
struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS];
......
......@@ -3830,12 +3830,10 @@ static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
return;
artxq = (void *)txq->drv_priv;
INIT_LIST_HEAD(&artxq->list);
}
static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
{
struct ath10k_txq *artxq;
struct ath10k_skb_cb *cb;
struct sk_buff *msdu;
int msdu_id;
......@@ -3843,12 +3841,6 @@ static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
if (!txq)
return;
artxq = (void *)txq->drv_priv;
spin_lock_bh(&ar->txqs_lock);
if (!list_empty(&artxq->list))
list_del_init(&artxq->list);
spin_unlock_bh(&ar->txqs_lock);
spin_lock_bh(&ar->htt.tx_lock);
idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
cb = ATH10K_SKB_CB(msdu);
......@@ -3978,23 +3970,17 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
void ath10k_mac_tx_push_pending(struct ath10k *ar)
{
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_txq *txq;
struct ath10k_txq *artxq;
struct ath10k_txq *last;
struct ieee80211_txq *txq, *first = NULL;
int ret;
int max;
if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
return;
spin_lock_bh(&ar->txqs_lock);
rcu_read_lock();
last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
while (!list_empty(&ar->txqs)) {
artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
txq = container_of((void *)artxq, struct ieee80211_txq,
drv_priv);
txq = ieee80211_next_txq(hw);
while (txq) {
/* Prevent aggressive sta/tid taking over tx queue */
max = 16;
......@@ -4005,18 +3991,21 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
break;
}
list_del_init(&artxq->list);
if (ret != -ENOENT)
list_add_tail(&artxq->list, &ar->txqs);
ieee80211_schedule_txq(hw, txq);
ath10k_htt_tx_txq_update(hw, txq);
if (artxq == last || (ret < 0 && ret != -ENOENT))
if (first == txq || (ret < 0 && ret != -ENOENT))
break;
if (!first)
first = txq;
txq = ieee80211_next_txq(hw);
}
rcu_read_unlock();
spin_unlock_bh(&ar->txqs_lock);
}
/************/
......@@ -4250,34 +4239,22 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
}
}
static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
struct ath10k_txq *artxq = (void *)txq->drv_priv;
struct ieee80211_txq *f_txq;
struct ath10k_txq *f_artxq;
struct ieee80211_txq *txq;
int ret = 0;
int max = 16;
spin_lock_bh(&ar->txqs_lock);
if (list_empty(&artxq->list))
list_add_tail(&artxq->list, &ar->txqs);
f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
list_del_init(&f_artxq->list);
txq = ieee80211_next_txq(hw);
while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
ret = ath10k_mac_tx_push_txq(hw, f_txq);
while (ath10k_mac_tx_can_push(hw, txq) && max--) {
ret = ath10k_mac_tx_push_txq(hw, txq);
if (ret)
break;
}
if (ret != -ENOENT)
list_add_tail(&f_artxq->list, &ar->txqs);
spin_unlock_bh(&ar->txqs_lock);
ieee80211_schedule_txq(hw, txq);
ath10k_htt_tx_txq_update(hw, f_txq);
ath10k_htt_tx_txq_update(hw, txq);
}
......
......@@ -246,12 +246,8 @@ struct ath_atx_tid {
s8 bar_index;
bool active;
bool clear_ps_filter;
bool has_queued;
};
void __ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid);
void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid);
struct ath_node {
struct ath_softc *sc;
struct ieee80211_sta *sta; /* station struct we're part of */
......@@ -591,8 +587,7 @@ bool ath_drain_all_txq(struct ath_softc *sc);
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq);
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
void ath_txq_schedule_all(struct ath_softc *sc);
void ath_txq_schedule(struct ath_softc *sc);
int ath_tx_init(struct ath_softc *sc, int nbufs);
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q);
......@@ -618,7 +613,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
u16 tids, int nframes,
enum ieee80211_frame_release_type reason,
bool more_data);
void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue);
void ath9k_wake_tx_queue(struct ieee80211_hw *hw);
/********/
/* VIFs */
......
......@@ -266,7 +266,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
}
work:
ath_restart_work(sc);
ath_txq_schedule_all(sc);
ath_txq_schedule(sc);
}
sc->gtt_cnt = 0;
......
......@@ -1057,8 +1057,6 @@ static void ath_rx_count_airtime(struct ath_softc *sc,
if (!!(sc->airtime_flags & AIRTIME_USE_RX)) {
spin_lock_bh(&acq->lock);
an->airtime_deficit[acno] -= airtime;
if (an->airtime_deficit[acno] <= 0)
__ath_tx_queue_tid(sc, ATH_AN_2_TID(an, tidno));
spin_unlock_bh(&acq->lock);
}
ath_debug_airtime(sc, an, airtime, 0);
......
This diff is collapsed.
......@@ -105,9 +105,12 @@
* The driver is expected to initialize its private per-queue data for stations
* and interfaces in the .add_interface and .sta_add ops.
*
* The driver can't access the queue directly. To dequeue a frame, it calls
* ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a queue, it
* calls the .wake_tx_queue driver op.
* The driver can't access the queue directly. To obtain the next queue to pull
* frames from, the driver calls ieee80211_next_txq(). To dequeue a frame from a
* txq, it calls ieee80211_tx_dequeue(). Whenever mac80211 adds a new frame to a
* queue, it calls the .wake_tx_queue driver op. The driver is expected to
* re-schedule the txq using ieee80211_schedule_txq() if it is still active
* after the driver has finished pulling packets from it.
*
* For AP powersave TIM handling, the driver only needs to indicate if it has
* buffered packets in the driver specific data structures by calling
......@@ -3731,8 +3734,7 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct ieee80211_tdls_ch_sw_params *params);
void (*wake_tx_queue)(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
void (*wake_tx_queue)(struct ieee80211_hw *hw);
void (*sync_rx_queues)(struct ieee80211_hw *hw);
int (*start_nan)(struct ieee80211_hw *hw,
......@@ -5883,13 +5885,36 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
* ieee80211_tx_dequeue - dequeue a packet from a software tx queue
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @txq: pointer obtained from station or virtual interface
* @txq: pointer obtained from ieee80211_next_txq()
*
* Returns the skb if successful, %NULL if no frame was available.
*/
struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
/**
* ieee80211_schedule_txq - add txq to scheduling loop
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @txq: pointer obtained from station or virtual interface
*
* Returns %true if the txq was actually added to the scheduling,
* %false otherwise.
*/
bool ieee80211_schedule_txq(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
/**
* ieee80211_next_txq - get next tx queue to pull packets from
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
*
* Returns the next txq if successful, %NULL if no queue is eligible. If a txq
* is returned, it will have been removed from the scheduler queue and needs to
* be re-scheduled with ieee80211_schedule_txq() to continue to be active.
*/
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw);
/**
* ieee80211_txq_get_depth - get pending frame/byte count of given txq
*
......
......@@ -226,9 +226,13 @@ ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
clear_bit(IEEE80211_TXQ_STOP, &txqi->flags);
if (!ieee80211_schedule_txq(&sta->sdata->local->hw, txq))
return;
local_bh_disable();
rcu_read_lock();
drv_wake_tx_queue(sta->sdata->local, txqi);
drv_wake_tx_queue(sta->sdata->local);
rcu_read_unlock();
local_bh_enable();
}
......
......@@ -1158,16 +1158,10 @@ drv_tdls_recv_channel_switch(struct ieee80211_local *local,
trace_drv_return_void(local);
}
static inline void drv_wake_tx_queue(struct ieee80211_local *local,
struct txq_info *txq)
static inline void drv_wake_tx_queue(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
if (!check_sdata_in_driver(sdata))
return;
trace_drv_wake_tx_queue(local, sdata, txq);
local->ops->wake_tx_queue(&local->hw, &txq->txq);
trace_drv_wake_tx_queue(local);
local->ops->wake_tx_queue(&local->hw);
}
static inline int drv_start_nan(struct ieee80211_local *local,
......
......@@ -832,6 +832,7 @@ struct txq_info {
struct codel_vars def_cvars;
struct codel_stats cstats;
struct sk_buff_head frags;
struct list_head schedule_order;
unsigned long flags;
/* keep last! */
......@@ -1122,6 +1123,10 @@ struct ieee80211_local {
struct codel_vars *cvars;
struct codel_params cparams;
/* protects active_txqs and txqi->schedule_order */
spinlock_t active_txq_lock;
struct list_head active_txqs;
const struct ieee80211_ops *ops;
/*
......
......@@ -619,6 +619,9 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
spin_lock_init(&local->rx_path_lock);
spin_lock_init(&local->queue_stop_reason_lock);
INIT_LIST_HEAD(&local->active_txqs);
spin_lock_init(&local->active_txq_lock);
INIT_LIST_HEAD(&local->chanctx_list);
mutex_init(&local->chanctx_mtx);
......
......@@ -1237,12 +1237,17 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
if (sta->sta.txq[0]) {
bool wake = false;
for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
if (!txq_has_queue(sta->sta.txq[i]))
continue;
drv_wake_tx_queue(local, to_txq_info(sta->sta.txq[i]));
if (ieee80211_schedule_txq(&local->hw, sta->sta.txq[i]))
wake = true;
}
if (wake)
drv_wake_tx_queue(local);
}
skb_queue_head_init(&pending);
......
......@@ -2550,35 +2550,9 @@ TRACE_EVENT(drv_tdls_recv_channel_switch,
)
);
TRACE_EVENT(drv_wake_tx_queue,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct txq_info *txq),
TP_ARGS(local, sdata, txq),
TP_STRUCT__entry(
LOCAL_ENTRY
VIF_ENTRY
STA_ENTRY
__field(u8, ac)
__field(u8, tid)
),
TP_fast_assign(
struct ieee80211_sta *sta = txq->txq.sta;
LOCAL_ASSIGN;
VIF_ASSIGN;
STA_ASSIGN;
__entry->ac = txq->txq.ac;
__entry->tid = txq->txq.tid;
),
TP_printk(
LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ac:%d tid:%d",
LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ac, __entry->tid
)
DEFINE_EVENT(local_only_evt, drv_wake_tx_queue,
TP_PROTO(struct ieee80211_local *local),
TP_ARGS(local)
);
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
......
......@@ -1439,6 +1439,7 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
codel_vars_init(&txqi->def_cvars);
codel_stats_init(&txqi->cstats);
__skb_queue_head_init(&txqi->frags);
INIT_LIST_HEAD(&txqi->schedule_order);
txqi->txq.vif = &sdata->vif;
......@@ -1462,6 +1463,7 @@ void ieee80211_txq_purge(struct ieee80211_local *local,
fq_tin_reset(fq, tin, fq_skb_free_func);
ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
list_del_init(&txqi->schedule_order);
}
int ieee80211_txq_setup_flows(struct ieee80211_local *local)
......@@ -1558,7 +1560,8 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
ieee80211_txq_enqueue(local, txqi, skb);
spin_unlock_bh(&fq->lock);
drv_wake_tx_queue(local, txqi);
if (ieee80211_schedule_txq(&local->hw, &txqi->txq))
drv_wake_tx_queue(local);
return true;
}
......@@ -3553,6 +3556,50 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_tx_dequeue);
bool ieee80211_schedule_txq(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
struct ieee80211_local *local = hw_to_local(hw);
struct txq_info *txqi = to_txq_info(txq);
bool ret = false;
spin_lock_bh(&local->active_txq_lock);
if (list_empty(&txqi->schedule_order)) {
list_add_tail(&txqi->schedule_order, &local->active_txqs);
ret = true;
}
spin_unlock_bh(&local->active_txq_lock);
return ret;
}
EXPORT_SYMBOL(ieee80211_schedule_txq);
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
struct txq_info *txqi = NULL;
spin_lock_bh(&local->active_txq_lock);
if (list_empty(&local->active_txqs))
goto out;
txqi = list_first_entry(&local->active_txqs,
struct txq_info, schedule_order);
list_del_init(&txqi->schedule_order);
out:
spin_unlock_bh(&local->active_txq_lock);
if (!txqi)
return NULL;
return &txqi->txq;
}
EXPORT_SYMBOL(ieee80211_next_txq);
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev,
u32 info_flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment