Commit c30e0b9b authored by Felix Fietkau's avatar Felix Fietkau Committed by Jakub Kicinski

net: ethernet: mtk_eth_soc: increase tx ring size for QDMA devices

In order to use the hardware traffic shaper feature, a larger tx ring is
needed, especially for the scratch ring, which the hardware shaper uses to
reorder packets.
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
Link: https://lore.kernel.org/r/20221116080734.44013-2-nbd@nbd.nameSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent dbc4af76
...@@ -938,7 +938,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) ...@@ -938,7 +938,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
{ {
const struct mtk_soc_data *soc = eth->soc; const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail; dma_addr_t phy_ring_tail;
int cnt = MTK_DMA_SIZE; int cnt = MTK_QDMA_RING_SIZE;
dma_addr_t dma_addr; dma_addr_t dma_addr;
int i; int i;
...@@ -2208,19 +2208,25 @@ static int mtk_tx_alloc(struct mtk_eth *eth) ...@@ -2208,19 +2208,25 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
struct mtk_tx_ring *ring = &eth->tx_ring; struct mtk_tx_ring *ring = &eth->tx_ring;
int i, sz = soc->txrx.txd_size; int i, sz = soc->txrx.txd_size;
struct mtk_tx_dma_v2 *txd; struct mtk_tx_dma_v2 *txd;
int ring_size;
ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
ring_size = MTK_QDMA_RING_SIZE;
else
ring_size = MTK_DMA_SIZE;
ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
GFP_KERNEL); GFP_KERNEL);
if (!ring->buf) if (!ring->buf)
goto no_tx_mem; goto no_tx_mem;
ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
&ring->phys, GFP_KERNEL); &ring->phys, GFP_KERNEL);
if (!ring->dma) if (!ring->dma)
goto no_tx_mem; goto no_tx_mem;
for (i = 0; i < MTK_DMA_SIZE; i++) { for (i = 0; i < ring_size; i++) {
int next = (i + 1) % MTK_DMA_SIZE; int next = (i + 1) % ring_size;
u32 next_ptr = ring->phys + next * sz; u32 next_ptr = ring->phys + next * sz;
txd = ring->dma + i * sz; txd = ring->dma + i * sz;
...@@ -2240,22 +2246,22 @@ static int mtk_tx_alloc(struct mtk_eth *eth) ...@@ -2240,22 +2246,22 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
* descriptors in ring->dma_pdma. * descriptors in ring->dma_pdma.
*/ */
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
&ring->phys_pdma, GFP_KERNEL); &ring->phys_pdma, GFP_KERNEL);
if (!ring->dma_pdma) if (!ring->dma_pdma)
goto no_tx_mem; goto no_tx_mem;
for (i = 0; i < MTK_DMA_SIZE; i++) { for (i = 0; i < ring_size; i++) {
ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF; ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
ring->dma_pdma[i].txd4 = 0; ring->dma_pdma[i].txd4 = 0;
} }
} }
ring->dma_size = MTK_DMA_SIZE; ring->dma_size = ring_size;
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); atomic_set(&ring->free_count, ring_size - 2);
ring->next_free = ring->dma; ring->next_free = ring->dma;
ring->last_free = (void *)txd; ring->last_free = (void *)txd;
ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz)); ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
ring->thresh = MAX_SKB_FRAGS; ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we /* make sure that all changes to the dma ring are flushed before we
...@@ -2267,14 +2273,14 @@ static int mtk_tx_alloc(struct mtk_eth *eth) ...@@ -2267,14 +2273,14 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
mtk_w32(eth, mtk_w32(eth,
ring->phys + ((MTK_DMA_SIZE - 1) * sz), ring->phys + ((ring_size - 1) * sz),
soc->reg_map->qdma.crx_ptr); soc->reg_map->qdma.crx_ptr);
mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
soc->reg_map->qdma.qtx_cfg); soc->reg_map->qdma.qtx_cfg);
} else { } else {
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0); mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
} }
...@@ -2292,7 +2298,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) ...@@ -2292,7 +2298,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
int i; int i;
if (ring->buf) { if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++) for (i = 0; i < ring->dma_size; i++)
mtk_tx_unmap(eth, &ring->buf[i], NULL, false); mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
kfree(ring->buf); kfree(ring->buf);
ring->buf = NULL; ring->buf = NULL;
...@@ -2300,14 +2306,14 @@ static void mtk_tx_clean(struct mtk_eth *eth) ...@@ -2300,14 +2306,14 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->dma) { if (ring->dma) {
dma_free_coherent(eth->dma_dev, dma_free_coherent(eth->dma_dev,
MTK_DMA_SIZE * soc->txrx.txd_size, ring->dma_size * soc->txrx.txd_size,
ring->dma, ring->phys); ring->dma, ring->phys);
ring->dma = NULL; ring->dma = NULL;
} }
if (ring->dma_pdma) { if (ring->dma_pdma) {
dma_free_coherent(eth->dma_dev, dma_free_coherent(eth->dma_dev,
MTK_DMA_SIZE * soc->txrx.txd_size, ring->dma_size * soc->txrx.txd_size,
ring->dma_pdma, ring->phys_pdma); ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL; ring->dma_pdma = NULL;
} }
...@@ -2842,7 +2848,7 @@ static void mtk_dma_free(struct mtk_eth *eth) ...@@ -2842,7 +2848,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
netdev_reset_queue(eth->netdev[i]); netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) { if (eth->scratch_ring) {
dma_free_coherent(eth->dma_dev, dma_free_coherent(eth->dma_dev,
MTK_DMA_SIZE * soc->txrx.txd_size, MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
eth->scratch_ring, eth->phy_scratch_ring); eth->scratch_ring, eth->phy_scratch_ring);
eth->scratch_ring = NULL; eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0; eth->phy_scratch_ring = 0;
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#define MTK_MAX_RX_LENGTH_2K 2048 #define MTK_MAX_RX_LENGTH_2K 2048
#define MTK_TX_DMA_BUF_LEN 0x3fff #define MTK_TX_DMA_BUF_LEN 0x3fff
#define MTK_TX_DMA_BUF_LEN_V2 0xffff #define MTK_TX_DMA_BUF_LEN_V2 0xffff
#define MTK_QDMA_RING_SIZE 2048
#define MTK_DMA_SIZE 512 #define MTK_DMA_SIZE 512
#define MTK_MAC_COUNT 2 #define MTK_MAC_COUNT 2
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment