Commit 9716ef04 authored by Felix Fietkau's avatar Felix Fietkau

mt76: attempt to free up more room when filling the tx queue

Run dma cleanup immediately if the queue is almost full, instead of waiting
for the tx interrupt
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 5342758d
...@@ -88,6 +88,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, ...@@ -88,6 +88,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int i; int i;
spin_lock_init(&q->lock); spin_lock_init(&q->lock);
spin_lock_init(&q->cleanup_lock);
q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
q->ndesc = n_desc; q->ndesc = n_desc;
...@@ -225,6 +226,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) ...@@ -225,6 +226,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
if (!q) if (!q)
return; return;
spin_lock_bh(&q->cleanup_lock);
if (flush) if (flush)
last = -1; last = -1;
else else
...@@ -243,6 +245,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) ...@@ -243,6 +245,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
last = readl(&q->regs->dma_idx); last = readl(&q->regs->dma_idx);
} }
spin_unlock_bh(&q->cleanup_lock);
if (flush) { if (flush) {
spin_lock_bh(&q->lock); spin_lock_bh(&q->lock);
......
...@@ -126,6 +126,7 @@ struct mt76_queue { ...@@ -126,6 +126,7 @@ struct mt76_queue {
struct mt76_queue_regs __iomem *regs; struct mt76_queue_regs __iomem *regs;
spinlock_t lock; spinlock_t lock;
spinlock_t cleanup_lock;
struct mt76_queue_entry *entry; struct mt76_queue_entry *entry;
struct mt76_desc *desc; struct mt76_desc *desc;
......
...@@ -448,6 +448,7 @@ static int ...@@ -448,6 +448,7 @@ static int
mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
{ {
struct mt76_queue *q = phy->q_tx[qid]; struct mt76_queue *q = phy->q_tx[qid];
struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq; struct ieee80211_txq *txq;
struct mt76_txq *mtxq; struct mt76_txq *mtxq;
struct mt76_wcid *wcid; struct mt76_wcid *wcid;
...@@ -461,6 +462,13 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) ...@@ -461,6 +462,13 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
break; break;
} }
if (dev->queue_ops->tx_cleanup &&
q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
spin_unlock_bh(&q->lock);
dev->queue_ops->tx_cleanup(dev, q, false);
spin_lock_bh(&q->lock);
}
if (mt76_txq_stopped(q)) if (mt76_txq_stopped(q))
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment