Commit ba459094 authored by Shayne Chen's avatar Shayne Chen Committed by Felix Fietkau

mt76: testmode: make tx queued limit adjustable

Originally, tx queued limit is set to 1000 to prevent from running out
of tx token. If a new testmode tx is triggered while the previous one
hasn't finished yet, we'll wait a period of time until tx_done equals to
tx_queued. Normally, current queued limit can finish in 10 seconds.

However, if ipg is configured to a larger value, less than 1000 packets
can be done in the default timeout period, which may lead to a crash
when a new testmode tx triggered.

To deal with this, make tx queued limit dynamically adjusted according
to ipg value.
Signed-off-by: default avatarShayne Chen <shayne.chen@mediatek.com>
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent b8cbdb97
...@@ -552,6 +552,7 @@ struct mt76_testmode_data { ...@@ -552,6 +552,7 @@ struct mt76_testmode_data {
u32 tx_pending; u32 tx_pending;
u32 tx_queued; u32 tx_queued;
u16 tx_queued_limit;
u32 tx_done; u32 tx_done;
struct { struct {
u64 packets[__MT_RXQ_MAX]; u64 packets[__MT_RXQ_MAX];
......
...@@ -30,6 +30,7 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy) ...@@ -30,6 +30,7 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
struct mt76_wcid *wcid = &dev->global_wcid; struct mt76_wcid *wcid = &dev->global_wcid;
struct sk_buff *skb = td->tx_skb; struct sk_buff *skb = td->tx_skb;
struct mt76_queue *q; struct mt76_queue *q;
u16 tx_queued_limit;
int qid; int qid;
if (!skb || !td->tx_pending) if (!skb || !td->tx_pending)
...@@ -38,9 +39,12 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy) ...@@ -38,9 +39,12 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
qid = skb_get_queue_mapping(skb); qid = skb_get_queue_mapping(skb);
q = phy->q_tx[qid]; q = phy->q_tx[qid];
tx_queued_limit = td->tx_queued_limit ? td->tx_queued_limit : 1000;
spin_lock_bh(&q->lock); spin_lock_bh(&q->lock);
while (td->tx_pending > 0 && td->tx_queued - td->tx_done < 1000 && while (td->tx_pending > 0 &&
td->tx_queued - td->tx_done < tx_queued_limit &&
q->queued < q->ndesc / 2) { q->queued < q->ndesc / 2) {
int ret; int ret;
...@@ -196,7 +200,8 @@ mt76_testmode_tx_stop(struct mt76_phy *phy) ...@@ -196,7 +200,8 @@ mt76_testmode_tx_stop(struct mt76_phy *phy)
mt76_worker_enable(&dev->tx_worker); mt76_worker_enable(&dev->tx_worker);
wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued, 10 * HZ); wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
MT76_TM_TIMEOUT * HZ);
dev_kfree_skb(td->tx_skb); dev_kfree_skb(td->tx_skb);
td->tx_skb = NULL; td->tx_skb = NULL;
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#ifndef __MT76_TESTMODE_H #ifndef __MT76_TESTMODE_H
#define __MT76_TESTMODE_H #define __MT76_TESTMODE_H
#define MT76_TM_TIMEOUT 10
/** /**
* enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA * enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment