Commit c4fd06c2 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by David S. Miller

net: ethernet: mtk_eth_soc: rely on txd_size in mtk_desc_to_tx_buf

This is a preliminary patch to add mt7986 ethernet support.
Tested-by: default avatarSam Shih <sam.shih@mediatek.com>
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0e05744b
...@@ -861,10 +861,11 @@ static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) ...@@ -861,10 +861,11 @@ static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
return ret + (desc - ring->phys); return ret + (desc - ring->phys);
} }
static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
struct mtk_tx_dma *txd) struct mtk_tx_dma *txd,
u32 txd_size)
{ {
int idx = txd - ring->dma; int idx = ((void *)txd - (void *)ring->dma) / txd_size;
return &ring->buf[idx]; return &ring->buf[idx];
} }
...@@ -986,6 +987,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -986,6 +987,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
}; };
struct mtk_mac *mac = netdev_priv(dev); struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw; struct mtk_eth *eth = mac->hw;
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_dma *itxd, *txd; struct mtk_tx_dma *itxd, *txd;
struct mtk_tx_dma *itxd_pdma, *txd_pdma; struct mtk_tx_dma *itxd_pdma, *txd_pdma;
struct mtk_tx_buf *itx_buf, *tx_buf; struct mtk_tx_buf *itx_buf, *tx_buf;
...@@ -997,7 +999,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -997,7 +999,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (itxd == ring->last_free) if (itxd == ring->last_free)
return -ENOMEM; return -ENOMEM;
itx_buf = mtk_desc_to_tx_buf(ring, itxd); itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
memset(itx_buf, 0, sizeof(*itx_buf)); memset(itx_buf, 0, sizeof(*itx_buf));
txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
...@@ -1025,7 +1027,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -1025,7 +1027,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
while (frag_size) { while (frag_size) {
bool new_desc = true; bool new_desc = true;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) || if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
(i & 0x1)) { (i & 0x1)) {
txd = mtk_qdma_phys_to_virt(ring, txd->txd2); txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
txd_pdma = qdma_to_pdma(ring, txd); txd_pdma = qdma_to_pdma(ring, txd);
...@@ -1049,7 +1051,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -1049,7 +1051,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
mtk_tx_set_dma_desc(dev, txd, &txd_info); mtk_tx_set_dma_desc(dev, txd, &txd_info);
tx_buf = mtk_desc_to_tx_buf(ring, txd); tx_buf = mtk_desc_to_tx_buf(ring, txd,
soc->txrx.txd_size);
if (new_desc) if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf)); memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
...@@ -1068,7 +1071,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -1068,7 +1071,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
/* store skb to cleanup */ /* store skb to cleanup */
itx_buf->skb = skb; itx_buf->skb = skb;
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (k & 0x1) if (k & 0x1)
txd_pdma->txd2 |= TX_DMA_LS0; txd_pdma->txd2 |= TX_DMA_LS0;
else else
...@@ -1086,7 +1089,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -1086,7 +1089,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
*/ */
wmb(); wmb();
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
!netdev_xmit_more()) !netdev_xmit_more())
mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
...@@ -1100,13 +1103,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -1100,13 +1103,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma: err_dma:
do { do {
tx_buf = mtk_desc_to_tx_buf(ring, itxd); tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
/* unmap dma */ /* unmap dma */
mtk_tx_unmap(eth, tx_buf, false); mtk_tx_unmap(eth, tx_buf, false);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
itxd_pdma->txd2 = TX_DMA_DESP2_DEF; itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
...@@ -1417,7 +1420,8 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, ...@@ -1417,7 +1420,8 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
break; break;
tx_buf = mtk_desc_to_tx_buf(ring, desc); tx_buf = mtk_desc_to_tx_buf(ring, desc,
eth->soc->txrx.txd_size);
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1) if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
mac = 1; mac = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment