Commit f699254c authored by Marc Yang's avatar Marc Yang Committed by John W. Linville

mwifiex: reduce CPU usage by tracking tx_pkts_queued

This patch adds tx_pkts_queued to track number of packets being
enqueued & dequeued so that mwifiex_wmm_lists_empty() evaluation
is lightweight.
Signed-off-by: default avatarMarc Yang <yangyang@marvell.com>
Signed-off-by: default avatarBing Zhao <bzhao@marvell.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 7176ba23
......@@ -196,6 +196,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
if (skb_src)
pra_list->total_pkts_size -= skb_src->len;
atomic_dec(&priv->wmm.tx_pkts_queued);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
......@@ -257,6 +259,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
pra_list->total_pkts_size += skb_aggr->len;
atomic_inc(&priv->wmm.tx_pkts_queued);
tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
......
......@@ -213,7 +213,8 @@ struct mwifiex_wmm_desc {
u32 drv_pkt_delay_max;
u8 queue_priority[IEEE80211_MAX_QUEUES];
u32 user_pri_pkt_tx_ctrl[WMM_HIGHEST_PRIORITY + 1]; /* UP: 0 to 7 */
/* Number of transmit packets queued */
atomic_t tx_pkts_queued;
};
struct mwifiex_802_11_security {
......
......@@ -399,6 +399,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
priv->add_ba_param.timeout = MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT;
priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
atomic_set(&priv->wmm.tx_pkts_queued, 0);
}
}
......@@ -408,17 +410,13 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
int
mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
{
int i, j;
int i;
struct mwifiex_private *priv;
for (j = 0; j < adapter->priv_num; ++j) {
priv = adapter->priv[j];
if (priv) {
for (i = 0; i < MAX_NUM_TID; i++)
if (!mwifiex_wmm_is_ra_list_empty(
&priv->wmm.tid_tbl_ptr[i].ra_list))
return false;
}
for (i = 0; i < adapter->priv_num; ++i) {
priv = adapter->priv[i];
if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
return false;
}
return true;
......@@ -468,6 +466,8 @@ static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
for (i = 0; i < MAX_NUM_TID; i++)
mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
ra_list);
atomic_set(&priv->wmm.tx_pkts_queued, 0);
}
/*
......@@ -638,6 +638,8 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_adapter *adapter,
ra_list->total_pkts_size += skb->len;
atomic_inc(&priv->wmm.tx_pkts_queued);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
}
......@@ -1028,6 +1030,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
.bss_prio_cur->list,
struct mwifiex_bss_prio_node,
list);
atomic_dec(&priv->wmm.tx_pkts_queued);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
}
......@@ -1134,6 +1137,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
.bss_prio_cur->list,
struct mwifiex_bss_prio_node,
list);
atomic_dec(&priv->wmm.tx_pkts_queued);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment