Commit c12128ce authored by Felix Fietkau's avatar Felix Fietkau

mt76: use a per rx queue page fragment cache

Using the NAPI or netdev frag cache along with other drivers can lead to
32 KiB pages being held for a long time, despite only being used for
very few page fragments.

This can happen if the driver grabs one or two fragments for rx ring
refill, while other drivers use (and free up) the remaining fragments.
The 32 KiB higher-order page can only be freed once all users have freed
their fragments.

Depending on the traffic patterns, this can waste a lot of memory and
look a lot like a memory leak.
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
parent 8842d485
...@@ -322,19 +322,13 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi) ...@@ -322,19 +322,13 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
int len = SKB_WITH_OVERHEAD(q->buf_size); int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset; int offset = q->buf_offset;
int idx; int idx;
void *(*alloc)(unsigned int fragsz);
if (napi)
alloc = napi_alloc_frag;
else
alloc = netdev_alloc_frag;
spin_lock_bh(&q->lock); spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) { while (q->queued < q->ndesc - 1) {
struct mt76_queue_buf qbuf; struct mt76_queue_buf qbuf;
buf = alloc(q->buf_size); buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
if (!buf) if (!buf)
break; break;
...@@ -361,6 +355,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi) ...@@ -361,6 +355,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
static void static void
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
{ {
struct page *page;
void *buf; void *buf;
bool more; bool more;
...@@ -373,6 +368,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) ...@@ -373,6 +368,13 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
skb_free_frag(buf); skb_free_frag(buf);
} while (1); } while (1);
spin_unlock_bh(&q->lock); spin_unlock_bh(&q->lock);
if (!q->rx_page.va)
return;
page = virt_to_page(q->rx_page.va);
__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
memset(&q->rx_page, 0, sizeof(q->rx_page));
} }
static void static void
......
...@@ -121,6 +121,7 @@ struct mt76_queue { ...@@ -121,6 +121,7 @@ struct mt76_queue {
dma_addr_t desc_dma; dma_addr_t desc_dma;
struct sk_buff *rx_head; struct sk_buff *rx_head;
struct page_frag_cache rx_page;
}; };
struct mt76_mcu_ops { struct mt76_mcu_ops {
......
...@@ -275,6 +275,7 @@ static int ...@@ -275,6 +275,7 @@ static int
mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf, mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
int nsgs, int len, int sglen) int nsgs, int len, int sglen)
{ {
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct urb *urb = buf->urb; struct urb *urb = buf->urb;
int i; int i;
...@@ -283,7 +284,7 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf, ...@@ -283,7 +284,7 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
void *data; void *data;
int offset; int offset;
data = netdev_alloc_frag(len); data = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
if (!data) if (!data)
break; break;
...@@ -550,10 +551,18 @@ static int mt76u_alloc_rx(struct mt76_dev *dev) ...@@ -550,10 +551,18 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
static void mt76u_free_rx(struct mt76_dev *dev) static void mt76u_free_rx(struct mt76_dev *dev)
{ {
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
struct page *page;
int i; int i;
for (i = 0; i < q->ndesc; i++) for (i = 0; i < q->ndesc; i++)
mt76u_buf_free(&q->entry[i].ubuf); mt76u_buf_free(&q->entry[i].ubuf);
if (!q->rx_page.va)
return;
page = virt_to_page(q->rx_page.va);
__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
memset(&q->rx_page, 0, sizeof(q->rx_page));
} }
static void mt76u_stop_rx(struct mt76_dev *dev) static void mt76u_stop_rx(struct mt76_dev *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment