Commit 2fe30dce authored by Felix Fietkau's avatar Felix Fietkau

mt76: reduce locking in mt76_dma_tx_cleanup

q->tail can be safely updated without locking, because there is no
concurrent access. If called from outside of the tasklet (for flushing),
the tasklet is always disabled.
q->queued can be safely read without locking, as long as the decrement
happens within the locked section.
This patch allows cleaning up tx packets outside of the section that holds
the queue lock for improved performance
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 90fdc171
......@@ -149,31 +149,29 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
struct mt76_sw_queue *sq = &dev->q_tx[qid];
struct mt76_queue *q = sq->q;
struct mt76_queue_entry entry;
unsigned int n_swq_queued[4] = {};
unsigned int n_queued = 0;
bool wake = false;
int last;
int i, last;
if (!q)
return;
spin_lock_bh(&q->lock);
if (flush)
last = -1;
else
last = readl(&q->regs->dma_idx);
while (q->queued && q->tail != last) {
while ((q->queued > n_queued) && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
if (entry.schedule)
dev->q_tx[entry.qid].swq_queued--;
n_swq_queued[entry.qid]++;
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
n_queued++;
if (entry.skb) {
spin_unlock_bh(&q->lock);
if (entry.skb)
dev->drv->tx_complete_skb(dev, qid, &entry);
spin_lock_bh(&q->lock);
}
if (entry.txwi) {
mt76_put_txwi(dev, entry.txwi);
......@@ -184,6 +182,16 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
last = readl(&q->regs->dma_idx);
}
spin_lock_bh(&q->lock);
q->queued -= n_queued;
for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
if (!n_swq_queued[i])
continue;
dev->q_tx[i].swq_queued -= n_swq_queued[i];
}
if (flush)
mt76_dma_sync_idx(dev, q);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment