Commit 10151339 authored by David S. Miller's avatar David S. Miller

Merge branch 'stmmac-Clean-up-and-tune-up'

Jose Abreu says:

====================
net: stmmac: Clean-up and tune-up

This targets to uniformize the handling of the different GMAC versions in
stmmac_main.c file and also tune-up the HW.

Currently there are some if/else conditions in the main source file which
calls different callbacks depending on the ID of GMAC.

With the introducion of a generic HW interface handling which automatically
selects the GMAC callbacks to be used, it is now unpleasant to see if
conditions in the main code because this should be completely agnostic of the
GMAC version.

This series removes most of these conditions. There are some if conditions
that remain untouched but the callbacks handling are now uniformized.

Tested in GMAC5, hope I didn't break any previous versions.

Please check [1] for performance analisys of patches 3-12.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 538e2de1 61fac60a
...@@ -276,17 +276,28 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr) ...@@ -276,17 +276,28 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr)
* Called from stmmac via stmmac_dma_ops->init * Called from stmmac via stmmac_dma_ops->init
*/ */
static void sun8i_dwmac_dma_init(void __iomem *ioaddr, static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, struct stmmac_dma_cfg *dma_cfg, int atds)
u32 dma_tx, u32 dma_rx, int atds)
{ {
/* Write TX and RX descriptors address */
writel(dma_rx, ioaddr + EMAC_RX_DESC_LIST);
writel(dma_tx, ioaddr + EMAC_TX_DESC_LIST);
writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN); writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
writel(0x1FFFFFF, ioaddr + EMAC_INT_STA); writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
} }
static void sun8i_dwmac_dma_init_rx(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
u32 dma_rx_phy, u32 chan)
{
/* Write RX descriptors address */
writel(dma_rx_phy, ioaddr + EMAC_RX_DESC_LIST);
}
static void sun8i_dwmac_dma_init_tx(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
u32 dma_tx_phy, u32 chan)
{
/* Write TX descriptors address */
writel(dma_tx_phy, ioaddr + EMAC_TX_DESC_LIST);
}
/* sun8i_dwmac_dump_regs() - Dump EMAC address space /* sun8i_dwmac_dump_regs() - Dump EMAC address space
* Called from stmmac_dma_ops->dump_regs * Called from stmmac_dma_ops->dump_regs
* Used for ethtool * Used for ethtool
...@@ -437,13 +448,36 @@ static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr, ...@@ -437,13 +448,36 @@ static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr,
return ret; return ret;
} }
static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode, static void sun8i_dwmac_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
int rxmode, int rxfifosz) u32 channel, int fifosz, u8 qmode)
{
u32 v;
v = readl(ioaddr + EMAC_RX_CTL1);
if (mode == SF_DMA_MODE) {
v |= EMAC_RX_MD;
} else {
v &= ~EMAC_RX_MD;
v &= ~EMAC_RX_TH_MASK;
if (mode < 32)
v |= EMAC_RX_TH_32;
else if (mode < 64)
v |= EMAC_RX_TH_64;
else if (mode < 96)
v |= EMAC_RX_TH_96;
else if (mode < 128)
v |= EMAC_RX_TH_128;
}
writel(v, ioaddr + EMAC_RX_CTL1);
}
static void sun8i_dwmac_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{ {
u32 v; u32 v;
v = readl(ioaddr + EMAC_TX_CTL1); v = readl(ioaddr + EMAC_TX_CTL1);
if (txmode == SF_DMA_MODE) { if (mode == SF_DMA_MODE) {
v |= EMAC_TX_MD; v |= EMAC_TX_MD;
/* Undocumented bit (called TX_NEXT_FRM in BSP), the original /* Undocumented bit (called TX_NEXT_FRM in BSP), the original
* comment is * comment is
...@@ -454,40 +488,26 @@ static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode, ...@@ -454,40 +488,26 @@ static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode,
} else { } else {
v &= ~EMAC_TX_MD; v &= ~EMAC_TX_MD;
v &= ~EMAC_TX_TH_MASK; v &= ~EMAC_TX_TH_MASK;
if (txmode < 64) if (mode < 64)
v |= EMAC_TX_TH_64; v |= EMAC_TX_TH_64;
else if (txmode < 128) else if (mode < 128)
v |= EMAC_TX_TH_128; v |= EMAC_TX_TH_128;
else if (txmode < 192) else if (mode < 192)
v |= EMAC_TX_TH_192; v |= EMAC_TX_TH_192;
else if (txmode < 256) else if (mode < 256)
v |= EMAC_TX_TH_256; v |= EMAC_TX_TH_256;
} }
writel(v, ioaddr + EMAC_TX_CTL1); writel(v, ioaddr + EMAC_TX_CTL1);
v = readl(ioaddr + EMAC_RX_CTL1);
if (rxmode == SF_DMA_MODE) {
v |= EMAC_RX_MD;
} else {
v &= ~EMAC_RX_MD;
v &= ~EMAC_RX_TH_MASK;
if (rxmode < 32)
v |= EMAC_RX_TH_32;
else if (rxmode < 64)
v |= EMAC_RX_TH_64;
else if (rxmode < 96)
v |= EMAC_RX_TH_96;
else if (rxmode < 128)
v |= EMAC_RX_TH_128;
}
writel(v, ioaddr + EMAC_RX_CTL1);
} }
static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = { static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
.reset = sun8i_dwmac_dma_reset, .reset = sun8i_dwmac_dma_reset,
.init = sun8i_dwmac_dma_init, .init = sun8i_dwmac_dma_init,
.init_rx_chan = sun8i_dwmac_dma_init_rx,
.init_tx_chan = sun8i_dwmac_dma_init_tx,
.dump_regs = sun8i_dwmac_dump_regs, .dump_regs = sun8i_dwmac_dump_regs,
.dma_mode = sun8i_dwmac_dma_operation_mode, .dma_rx_mode = sun8i_dwmac_dma_operation_mode_rx,
.dma_tx_mode = sun8i_dwmac_dma_operation_mode_tx,
.enable_dma_transmission = sun8i_dwmac_enable_dma_transmission, .enable_dma_transmission = sun8i_dwmac_enable_dma_transmission,
.enable_dma_irq = sun8i_dwmac_enable_dma_irq, .enable_dma_irq = sun8i_dwmac_enable_dma_irq,
.disable_dma_irq = sun8i_dwmac_disable_dma_irq, .disable_dma_irq = sun8i_dwmac_disable_dma_irq,
......
...@@ -81,8 +81,7 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) ...@@ -81,8 +81,7 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
} }
static void dwmac1000_dma_init(void __iomem *ioaddr, static void dwmac1000_dma_init(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, struct stmmac_dma_cfg *dma_cfg, int atds)
u32 dma_tx, u32 dma_rx, int atds)
{ {
u32 value = readl(ioaddr + DMA_BUS_MODE); u32 value = readl(ioaddr + DMA_BUS_MODE);
int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
...@@ -119,12 +118,22 @@ static void dwmac1000_dma_init(void __iomem *ioaddr, ...@@ -119,12 +118,22 @@ static void dwmac1000_dma_init(void __iomem *ioaddr,
/* Mask interrupts by writing to CSR7 */ /* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
}
/* RX/TX descriptor base address lists must be written into static void dwmac1000_dma_init_rx(void __iomem *ioaddr,
* DMA CSR3 and CSR4, respectively struct stmmac_dma_cfg *dma_cfg,
*/ u32 dma_rx_phy, u32 chan)
writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); {
writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); /* RX descriptor base address list must be written into DMA CSR3 */
writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR);
}
static void dwmac1000_dma_init_tx(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
u32 dma_tx_phy, u32 chan)
{
/* TX descriptor base address list must be written into DMA CSR4 */
writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR);
} }
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
...@@ -148,12 +157,40 @@ static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) ...@@ -148,12 +157,40 @@ static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
return csr6; return csr6;
} }
static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
int rxmode, int rxfifosz) u32 channel, int fifosz, u8 qmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
csr6 |= DMA_CONTROL_RSF;
} else {
pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
csr6 &= ~DMA_CONTROL_RSF;
csr6 &= DMA_CONTROL_TC_RX_MASK;
if (mode <= 32)
csr6 |= DMA_CONTROL_RTC_32;
else if (mode <= 64)
csr6 |= DMA_CONTROL_RTC_64;
else if (mode <= 96)
csr6 |= DMA_CONTROL_RTC_96;
else
csr6 |= DMA_CONTROL_RTC_128;
}
/* Configure flow control based on rx fifo size */
csr6 = dwmac1000_configure_fc(csr6, fifosz);
writel(csr6, ioaddr + DMA_CONTROL);
}
static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{ {
u32 csr6 = readl(ioaddr + DMA_CONTROL); u32 csr6 = readl(ioaddr + DMA_CONTROL);
if (txmode == SF_DMA_MODE) { if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n"); pr_debug("GMAC: enable TX store and forward mode\n");
/* Transmit COE type 2 cannot be done in cut-through mode. */ /* Transmit COE type 2 cannot be done in cut-through mode. */
csr6 |= DMA_CONTROL_TSF; csr6 |= DMA_CONTROL_TSF;
...@@ -162,42 +199,22 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, ...@@ -162,42 +199,22 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
*/ */
csr6 |= DMA_CONTROL_OSF; csr6 |= DMA_CONTROL_OSF;
} else { } else {
pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode); pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
csr6 &= ~DMA_CONTROL_TSF; csr6 &= ~DMA_CONTROL_TSF;
csr6 &= DMA_CONTROL_TC_TX_MASK; csr6 &= DMA_CONTROL_TC_TX_MASK;
/* Set the transmit threshold */ /* Set the transmit threshold */
if (txmode <= 32) if (mode <= 32)
csr6 |= DMA_CONTROL_TTC_32; csr6 |= DMA_CONTROL_TTC_32;
else if (txmode <= 64) else if (mode <= 64)
csr6 |= DMA_CONTROL_TTC_64; csr6 |= DMA_CONTROL_TTC_64;
else if (txmode <= 128) else if (mode <= 128)
csr6 |= DMA_CONTROL_TTC_128; csr6 |= DMA_CONTROL_TTC_128;
else if (txmode <= 192) else if (mode <= 192)
csr6 |= DMA_CONTROL_TTC_192; csr6 |= DMA_CONTROL_TTC_192;
else else
csr6 |= DMA_CONTROL_TTC_256; csr6 |= DMA_CONTROL_TTC_256;
} }
if (rxmode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
csr6 |= DMA_CONTROL_RSF;
} else {
pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
csr6 &= ~DMA_CONTROL_RSF;
csr6 &= DMA_CONTROL_TC_RX_MASK;
if (rxmode <= 32)
csr6 |= DMA_CONTROL_RTC_32;
else if (rxmode <= 64)
csr6 |= DMA_CONTROL_RTC_64;
else if (rxmode <= 96)
csr6 |= DMA_CONTROL_RTC_96;
else
csr6 |= DMA_CONTROL_RTC_128;
}
/* Configure flow control based on rx fifo size */
csr6 = dwmac1000_configure_fc(csr6, rxfifosz);
writel(csr6, ioaddr + DMA_CONTROL); writel(csr6, ioaddr + DMA_CONTROL);
} }
...@@ -256,9 +273,12 @@ static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt, ...@@ -256,9 +273,12 @@ static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
const struct stmmac_dma_ops dwmac1000_dma_ops = { const struct stmmac_dma_ops dwmac1000_dma_ops = {
.reset = dwmac_dma_reset, .reset = dwmac_dma_reset,
.init = dwmac1000_dma_init, .init = dwmac1000_dma_init,
.init_rx_chan = dwmac1000_dma_init_rx,
.init_tx_chan = dwmac1000_dma_init_tx,
.axi = dwmac1000_dma_axi, .axi = dwmac1000_dma_axi,
.dump_regs = dwmac1000_dump_dma_regs, .dump_regs = dwmac1000_dump_dma_regs,
.dma_mode = dwmac1000_dma_operation_mode, .dma_rx_mode = dwmac1000_dma_operation_mode_rx,
.dma_tx_mode = dwmac1000_dma_operation_mode_tx,
.enable_dma_transmission = dwmac_enable_dma_transmission, .enable_dma_transmission = dwmac_enable_dma_transmission,
.enable_dma_irq = dwmac_enable_dma_irq, .enable_dma_irq = dwmac_enable_dma_irq,
.disable_dma_irq = dwmac_disable_dma_irq, .disable_dma_irq = dwmac_disable_dma_irq,
......
...@@ -29,8 +29,7 @@ ...@@ -29,8 +29,7 @@
#include "dwmac_dma.h" #include "dwmac_dma.h"
static void dwmac100_dma_init(void __iomem *ioaddr, static void dwmac100_dma_init(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, struct stmmac_dma_cfg *dma_cfg, int atds)
u32 dma_tx, u32 dma_rx, int atds)
{ {
/* Enable Application Access by writing to DMA CSR0 */ /* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT), writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
...@@ -38,12 +37,22 @@ static void dwmac100_dma_init(void __iomem *ioaddr, ...@@ -38,12 +37,22 @@ static void dwmac100_dma_init(void __iomem *ioaddr,
/* Mask interrupts by writing to CSR7 */ /* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
}
/* RX/TX descriptor base addr lists must be written into static void dwmac100_dma_init_rx(void __iomem *ioaddr,
* DMA CSR3 and CSR4, respectively struct stmmac_dma_cfg *dma_cfg,
*/ u32 dma_rx_phy, u32 chan)
writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); {
writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); /* RX descriptor base addr lists must be written into DMA CSR3 */
writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR);
}
static void dwmac100_dma_init_tx(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg,
u32 dma_tx_phy, u32 chan)
{
/* TX descriptor base addr lists must be written into DMA CSR4 */
writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR);
} }
/* Store and Forward capability is not used at all. /* Store and Forward capability is not used at all.
...@@ -51,14 +60,14 @@ static void dwmac100_dma_init(void __iomem *ioaddr, ...@@ -51,14 +60,14 @@ static void dwmac100_dma_init(void __iomem *ioaddr,
* The transmit threshold can be programmed by setting the TTC bits in the DMA * The transmit threshold can be programmed by setting the TTC bits in the DMA
* control register. * control register.
*/ */
static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode, static void dwmac100_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
int rxmode, int rxfifosz) u32 channel, int fifosz, u8 qmode)
{ {
u32 csr6 = readl(ioaddr + DMA_CONTROL); u32 csr6 = readl(ioaddr + DMA_CONTROL);
if (txmode <= 32) if (mode <= 32)
csr6 |= DMA_CONTROL_TTC_32; csr6 |= DMA_CONTROL_TTC_32;
else if (txmode <= 64) else if (mode <= 64)
csr6 |= DMA_CONTROL_TTC_64; csr6 |= DMA_CONTROL_TTC_64;
else else
csr6 |= DMA_CONTROL_TTC_128; csr6 |= DMA_CONTROL_TTC_128;
...@@ -112,8 +121,10 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, ...@@ -112,8 +121,10 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
const struct stmmac_dma_ops dwmac100_dma_ops = { const struct stmmac_dma_ops dwmac100_dma_ops = {
.reset = dwmac_dma_reset, .reset = dwmac_dma_reset,
.init = dwmac100_dma_init, .init = dwmac100_dma_init,
.init_rx_chan = dwmac100_dma_init_rx,
.init_tx_chan = dwmac100_dma_init_tx,
.dump_regs = dwmac100_dump_dma_regs, .dump_regs = dwmac100_dump_dma_regs,
.dma_mode = dwmac100_dma_operation_mode, .dma_tx_mode = dwmac100_dma_operation_mode_tx,
.dma_diagnostic_fr = dwmac100_dma_diagnostic_fr, .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
.enable_dma_transmission = dwmac_enable_dma_transmission, .enable_dma_transmission = dwmac_enable_dma_transmission,
.enable_dma_irq = dwmac_enable_dma_irq, .enable_dma_irq = dwmac_enable_dma_irq,
......
...@@ -189,9 +189,12 @@ static void dwmac4_set_tx_owner(struct dma_desc *p) ...@@ -189,9 +189,12 @@ static void dwmac4_set_tx_owner(struct dma_desc *p)
p->des3 |= cpu_to_le32(TDES3_OWN); p->des3 |= cpu_to_le32(TDES3_OWN);
} }
static void dwmac4_set_rx_owner(struct dma_desc *p) static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{ {
p->des3 |= cpu_to_le32(RDES3_OWN); p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
if (!disable_rx_ic)
p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
} }
static int dwmac4_get_tx_ls(struct dma_desc *p) static int dwmac4_get_tx_ls(struct dma_desc *p)
...@@ -292,10 +295,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc, ...@@ -292,10 +295,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end) int mode, int end)
{ {
p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR); dwmac4_set_rx_owner(p, disable_rx_ic);
if (!disable_rx_ic)
p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
} }
static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end) static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
...@@ -424,6 +424,25 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss) ...@@ -424,6 +424,25 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV); p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
} }
static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr)
{
*addr = le32_to_cpu(p->des0);
}
static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
{
p->des0 = cpu_to_le32(addr);
p->des1 = 0;
}
static void dwmac4_clear(struct dma_desc *p)
{
p->des0 = 0;
p->des1 = 0;
p->des2 = 0;
p->des3 = 0;
}
const struct stmmac_desc_ops dwmac4_desc_ops = { const struct stmmac_desc_ops dwmac4_desc_ops = {
.tx_status = dwmac4_wrback_get_tx_status, .tx_status = dwmac4_wrback_get_tx_status,
.rx_status = dwmac4_wrback_get_rx_status, .rx_status = dwmac4_wrback_get_rx_status,
...@@ -445,6 +464,9 @@ const struct stmmac_desc_ops dwmac4_desc_ops = { ...@@ -445,6 +464,9 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.init_tx_desc = dwmac4_rd_init_tx_desc, .init_tx_desc = dwmac4_rd_init_tx_desc,
.display_ring = dwmac4_display_ring, .display_ring = dwmac4_display_ring,
.set_mss = dwmac4_set_mss_ctxt, .set_mss = dwmac4_set_mss_ctxt,
.get_addr = dwmac4_get_addr,
.set_addr = dwmac4_set_addr,
.clear = dwmac4_clear,
}; };
const struct stmmac_mode_ops dwmac4_ring_mode_ops = { }; const struct stmmac_mode_ops dwmac4_ring_mode_ops = { };
...@@ -94,6 +94,10 @@ static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr, ...@@ -94,6 +94,10 @@ static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
/* Enable OSP to get best performance */
value |= DMA_CONTROL_OSP;
writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
...@@ -116,8 +120,7 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr, ...@@ -116,8 +120,7 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr,
} }
static void dwmac4_dma_init(void __iomem *ioaddr, static void dwmac4_dma_init(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, struct stmmac_dma_cfg *dma_cfg, int atds)
u32 dma_tx, u32 dma_rx, int atds)
{ {
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
......
...@@ -184,7 +184,6 @@ ...@@ -184,7 +184,6 @@
#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8 #define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
int dwmac4_dma_reset(void __iomem *ioaddr); int dwmac4_dma_reset(void __iomem *ioaddr);
void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
......
...@@ -292,7 +292,7 @@ static void enh_desc_set_tx_owner(struct dma_desc *p) ...@@ -292,7 +292,7 @@ static void enh_desc_set_tx_owner(struct dma_desc *p)
p->des0 |= cpu_to_le32(ETDES0_OWN); p->des0 |= cpu_to_le32(ETDES0_OWN);
} }
static void enh_desc_set_rx_owner(struct dma_desc *p) static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{ {
p->des0 |= cpu_to_le32(RDES0_OWN); p->des0 |= cpu_to_le32(RDES0_OWN);
} }
...@@ -437,6 +437,21 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx) ...@@ -437,6 +437,21 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
pr_info("\n"); pr_info("\n");
} }
static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr)
{
*addr = le32_to_cpu(p->des2);
}
static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr)
{
p->des2 = cpu_to_le32(addr);
}
static void enh_desc_clear(struct dma_desc *p)
{
p->des2 = 0;
}
const struct stmmac_desc_ops enh_desc_ops = { const struct stmmac_desc_ops enh_desc_ops = {
.tx_status = enh_desc_get_tx_status, .tx_status = enh_desc_get_tx_status,
.rx_status = enh_desc_get_rx_status, .rx_status = enh_desc_get_rx_status,
...@@ -457,4 +472,7 @@ const struct stmmac_desc_ops enh_desc_ops = { ...@@ -457,4 +472,7 @@ const struct stmmac_desc_ops enh_desc_ops = {
.get_timestamp = enh_desc_get_timestamp, .get_timestamp = enh_desc_get_timestamp,
.get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
.display_ring = enh_desc_display_ring, .display_ring = enh_desc_display_ring,
.get_addr = enh_desc_get_addr,
.set_addr = enh_desc_set_addr,
.clear = enh_desc_clear,
}; };
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "common.h" #include "common.h"
#include "stmmac.h" #include "stmmac.h"
#include "stmmac_ptp.h"
static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg) static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
{ {
...@@ -72,6 +73,7 @@ static const struct stmmac_hwif_entry { ...@@ -72,6 +73,7 @@ static const struct stmmac_hwif_entry {
bool gmac; bool gmac;
bool gmac4; bool gmac4;
u32 min_id; u32 min_id;
const struct stmmac_regs_off regs;
const void *desc; const void *desc;
const void *dma; const void *dma;
const void *mac; const void *mac;
...@@ -86,6 +88,10 @@ static const struct stmmac_hwif_entry { ...@@ -86,6 +88,10 @@ static const struct stmmac_hwif_entry {
.gmac = false, .gmac = false,
.gmac4 = false, .gmac4 = false,
.min_id = 0, .min_id = 0,
.regs = {
.ptp_off = PTP_GMAC3_X_OFFSET,
.mmc_off = MMC_GMAC3_X_OFFSET,
},
.desc = NULL, .desc = NULL,
.dma = &dwmac100_dma_ops, .dma = &dwmac100_dma_ops,
.mac = &dwmac100_ops, .mac = &dwmac100_ops,
...@@ -98,6 +104,10 @@ static const struct stmmac_hwif_entry { ...@@ -98,6 +104,10 @@ static const struct stmmac_hwif_entry {
.gmac = true, .gmac = true,
.gmac4 = false, .gmac4 = false,
.min_id = 0, .min_id = 0,
.regs = {
.ptp_off = PTP_GMAC3_X_OFFSET,
.mmc_off = MMC_GMAC3_X_OFFSET,
},
.desc = NULL, .desc = NULL,
.dma = &dwmac1000_dma_ops, .dma = &dwmac1000_dma_ops,
.mac = &dwmac1000_ops, .mac = &dwmac1000_ops,
...@@ -110,6 +120,10 @@ static const struct stmmac_hwif_entry { ...@@ -110,6 +120,10 @@ static const struct stmmac_hwif_entry {
.gmac = false, .gmac = false,
.gmac4 = true, .gmac4 = true,
.min_id = 0, .min_id = 0,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops, .desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops, .dma = &dwmac4_dma_ops,
.mac = &dwmac4_ops, .mac = &dwmac4_ops,
...@@ -122,6 +136,10 @@ static const struct stmmac_hwif_entry { ...@@ -122,6 +136,10 @@ static const struct stmmac_hwif_entry {
.gmac = false, .gmac = false,
.gmac4 = true, .gmac4 = true,
.min_id = DWMAC_CORE_4_00, .min_id = DWMAC_CORE_4_00,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops, .desc = &dwmac4_desc_ops,
.dma = &dwmac4_dma_ops, .dma = &dwmac4_dma_ops,
.mac = &dwmac410_ops, .mac = &dwmac410_ops,
...@@ -134,6 +152,10 @@ static const struct stmmac_hwif_entry { ...@@ -134,6 +152,10 @@ static const struct stmmac_hwif_entry {
.gmac = false, .gmac = false,
.gmac4 = true, .gmac4 = true,
.min_id = DWMAC_CORE_4_10, .min_id = DWMAC_CORE_4_10,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops, .desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops, .dma = &dwmac410_dma_ops,
.mac = &dwmac410_ops, .mac = &dwmac410_ops,
...@@ -146,6 +168,10 @@ static const struct stmmac_hwif_entry { ...@@ -146,6 +168,10 @@ static const struct stmmac_hwif_entry {
.gmac = false, .gmac = false,
.gmac4 = true, .gmac4 = true,
.min_id = DWMAC_CORE_5_10, .min_id = DWMAC_CORE_5_10,
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops, .desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops, .dma = &dwmac410_dma_ops,
.mac = &dwmac510_ops, .mac = &dwmac510_ops,
...@@ -175,6 +201,12 @@ int stmmac_hwif_init(struct stmmac_priv *priv) ...@@ -175,6 +201,12 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
/* Save ID for later use */ /* Save ID for later use */
priv->synopsys_id = id; priv->synopsys_id = id;
/* Lets assume some safe values first */
priv->ptpaddr = priv->ioaddr +
(needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET);
priv->mmcaddr = priv->ioaddr +
(needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET);
/* Check for HW specific setup first */ /* Check for HW specific setup first */
if (priv->plat->setup) { if (priv->plat->setup) {
priv->hw = priv->plat->setup(priv); priv->hw = priv->plat->setup(priv);
...@@ -206,6 +238,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv) ...@@ -206,6 +238,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv)
mac->tc = entry->tc; mac->tc = entry->tc;
priv->hw = mac; priv->hw = mac;
priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off;
/* Entry found */ /* Entry found */
ret = entry->setup(priv); ret = entry->setup(priv);
......
...@@ -59,7 +59,7 @@ struct stmmac_desc_ops { ...@@ -59,7 +59,7 @@ struct stmmac_desc_ops {
/* Get the buffer size from the descriptor */ /* Get the buffer size from the descriptor */
int (*get_tx_len)(struct dma_desc *p); int (*get_tx_len)(struct dma_desc *p);
/* Handle extra events on specific interrupts hw dependent */ /* Handle extra events on specific interrupts hw dependent */
void (*set_rx_owner)(struct dma_desc *p); void (*set_rx_owner)(struct dma_desc *p, int disable_rx_ic);
/* Get the receive frame size */ /* Get the receive frame size */
int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type); int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type);
/* Return the reception status looking at the RDES1 */ /* Return the reception status looking at the RDES1 */
...@@ -79,6 +79,12 @@ struct stmmac_desc_ops { ...@@ -79,6 +79,12 @@ struct stmmac_desc_ops {
void (*display_ring)(void *head, unsigned int size, bool rx); void (*display_ring)(void *head, unsigned int size, bool rx);
/* set MSS via context descriptor */ /* set MSS via context descriptor */
void (*set_mss)(struct dma_desc *p, unsigned int mss); void (*set_mss)(struct dma_desc *p, unsigned int mss);
/* get descriptor skbuff address */
void (*get_addr)(struct dma_desc *p, unsigned int *addr);
/* set descriptor skbuff address */
void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
/* clear descriptor */
void (*clear)(struct dma_desc *p);
}; };
#define stmmac_init_rx_desc(__priv, __args...) \ #define stmmac_init_rx_desc(__priv, __args...) \
...@@ -123,6 +129,12 @@ struct stmmac_desc_ops { ...@@ -123,6 +129,12 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, display_ring, __args) stmmac_do_void_callback(__priv, desc, display_ring, __args)
#define stmmac_set_mss(__priv, __args...) \ #define stmmac_set_mss(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_mss, __args) stmmac_do_void_callback(__priv, desc, set_mss, __args)
#define stmmac_get_desc_addr(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, get_addr, __args)
#define stmmac_set_desc_addr(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_addr, __args)
#define stmmac_clear_desc(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, clear, __args)
struct stmmac_dma_cfg; struct stmmac_dma_cfg;
struct dma_features; struct dma_features;
...@@ -132,7 +144,7 @@ struct stmmac_dma_ops { ...@@ -132,7 +144,7 @@ struct stmmac_dma_ops {
/* DMA core initialization */ /* DMA core initialization */
int (*reset)(void __iomem *ioaddr); int (*reset)(void __iomem *ioaddr);
void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
u32 dma_tx, u32 dma_rx, int atds); int atds);
void (*init_chan)(void __iomem *ioaddr, void (*init_chan)(void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan); struct stmmac_dma_cfg *dma_cfg, u32 chan);
void (*init_rx_chan)(void __iomem *ioaddr, void (*init_rx_chan)(void __iomem *ioaddr,
...@@ -145,10 +157,6 @@ struct stmmac_dma_ops { ...@@ -145,10 +157,6 @@ struct stmmac_dma_ops {
void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
/* Dump DMA registers */ /* Dump DMA registers */
void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space); void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space);
/* Set tx/rx threshold in the csr6 register
* An invalid value enables the store-and-forward mode */
void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
int rxfifosz);
void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
int fifosz, u8 qmode); int fifosz, u8 qmode);
void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel, void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel,
...@@ -191,8 +199,6 @@ struct stmmac_dma_ops { ...@@ -191,8 +199,6 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, axi, __args) stmmac_do_void_callback(__priv, dma, axi, __args)
#define stmmac_dump_dma_regs(__priv, __args...) \ #define stmmac_dump_dma_regs(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, dump_regs, __args) stmmac_do_void_callback(__priv, dma, dump_regs, __args)
#define stmmac_dma_mode(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, dma_mode, __args)
#define stmmac_dma_rx_mode(__priv, __args...) \ #define stmmac_dma_rx_mode(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, dma_rx_mode, __args) stmmac_do_void_callback(__priv, dma, dma_rx_mode, __args)
#define stmmac_dma_tx_mode(__priv, __args...) \ #define stmmac_dma_tx_mode(__priv, __args...) \
...@@ -440,6 +446,11 @@ struct stmmac_tc_ops { ...@@ -440,6 +446,11 @@ struct stmmac_tc_ops {
#define stmmac_tc_setup_cls_u32(__priv, __args...) \ #define stmmac_tc_setup_cls_u32(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cls_u32, __args) stmmac_do_callback(__priv, tc, setup_cls_u32, __args)
struct stmmac_regs_off {
u32 ptp_off;
u32 mmc_off;
};
extern const struct stmmac_ops dwmac100_ops; extern const struct stmmac_ops dwmac100_ops;
extern const struct stmmac_dma_ops dwmac100_dma_ops; extern const struct stmmac_dma_ops dwmac100_dma_ops;
extern const struct stmmac_ops dwmac1000_ops; extern const struct stmmac_ops dwmac1000_ops;
......
...@@ -168,7 +168,7 @@ static void ndesc_set_tx_owner(struct dma_desc *p) ...@@ -168,7 +168,7 @@ static void ndesc_set_tx_owner(struct dma_desc *p)
p->des0 |= cpu_to_le32(TDES0_OWN); p->des0 |= cpu_to_le32(TDES0_OWN);
} }
static void ndesc_set_rx_owner(struct dma_desc *p) static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{ {
p->des0 |= cpu_to_le32(RDES0_OWN); p->des0 |= cpu_to_le32(RDES0_OWN);
} }
...@@ -297,6 +297,21 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx) ...@@ -297,6 +297,21 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx)
pr_info("\n"); pr_info("\n");
} }
static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr)
{
*addr = le32_to_cpu(p->des2);
}
static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr)
{
p->des2 = cpu_to_le32(addr);
}
static void ndesc_clear(struct dma_desc *p)
{
p->des2 = 0;
}
const struct stmmac_desc_ops ndesc_ops = { const struct stmmac_desc_ops ndesc_ops = {
.tx_status = ndesc_get_tx_status, .tx_status = ndesc_get_tx_status,
.rx_status = ndesc_get_rx_status, .rx_status = ndesc_get_rx_status,
...@@ -316,4 +331,7 @@ const struct stmmac_desc_ops ndesc_ops = { ...@@ -316,4 +331,7 @@ const struct stmmac_desc_ops ndesc_ops = {
.get_timestamp = ndesc_get_timestamp, .get_timestamp = ndesc_get_timestamp,
.get_rx_timestamp_status = ndesc_get_rx_timestamp_status, .get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
.display_ring = ndesc_display_ring, .display_ring = ndesc_display_ring,
.get_addr = ndesc_get_addr,
.set_addr = ndesc_set_addr,
.clear = ndesc_clear,
}; };
...@@ -105,6 +105,7 @@ struct stmmac_priv { ...@@ -105,6 +105,7 @@ struct stmmac_priv {
u32 tx_count_frames; u32 tx_count_frames;
u32 tx_coal_frames; u32 tx_coal_frames;
u32 tx_coal_timer; u32 tx_coal_timer;
bool tx_timer_armed;
int tx_coalesce; int tx_coalesce;
int hwts_tx_en; int hwts_tx_en;
......
...@@ -1156,10 +1156,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, ...@@ -1156,10 +1156,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return -EINVAL; return -EINVAL;
} }
if (priv->synopsys_id >= DWMAC_CORE_4_00) stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
else
p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
if (priv->dma_buf_sz == BUF_SIZE_16KiB) if (priv->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p); stmmac_init_desc3(priv, p);
...@@ -1344,14 +1341,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev) ...@@ -1344,14 +1341,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
else else
p = tx_q->dma_tx + i; p = tx_q->dma_tx + i;
if (priv->synopsys_id >= DWMAC_CORE_4_00) { stmmac_clear_desc(priv, p);
p->des0 = 0;
p->des1 = 0;
p->des2 = 0;
p->des3 = 0;
} else {
p->des2 = 0;
}
tx_q->tx_skbuff_dma[i].buf = 0; tx_q->tx_skbuff_dma[i].buf = 0;
tx_q->tx_skbuff_dma[i].map_as_page = false; tx_q->tx_skbuff_dma[i].map_as_page = false;
...@@ -1797,22 +1787,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) ...@@ -1797,22 +1787,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
} }
/* configure all channels */ /* configure all channels */
if (priv->synopsys_id >= DWMAC_CORE_4_00) { for (chan = 0; chan < rx_channels_count; chan++) {
for (chan = 0; chan < rx_channels_count; chan++) { qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
rxfifosz, qmode); rxfifosz, qmode);
} }
for (chan = 0; chan < tx_channels_count; chan++) { for (chan = 0; chan < tx_channels_count; chan++) {
qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
txfifosz, qmode); txfifosz, qmode);
}
} else {
stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
} }
} }
...@@ -1981,23 +1967,14 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, ...@@ -1981,23 +1967,14 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
rxfifosz /= rx_channels_count; rxfifosz /= rx_channels_count;
txfifosz /= tx_channels_count; txfifosz /= tx_channels_count;
if (priv->synopsys_id >= DWMAC_CORE_4_00) { stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
rxqmode);
stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz,
txqmode);
} else {
stmmac_dma_mode(priv, priv->ioaddr, txmode, rxmode, rxfifosz);
}
} }
static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
{ {
int ret = false; int ret;
/* Safety features are only available in cores >= 5.10 */
if (priv->synopsys_id < DWMAC_CORE_5_10)
return ret;
ret = stmmac_safety_feat_irq_status(priv, priv->dev, ret = stmmac_safety_feat_irq_status(priv, priv->dev,
priv->ioaddr, priv->dma_cap.asp, &priv->sstats); priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
if (ret && (ret != -EINVAL)) { if (ret && (ret != -EINVAL)) {
...@@ -2108,14 +2085,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) ...@@ -2108,14 +2085,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
} else {
priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
}
dwmac_mmc_intr_all_mask(priv->mmcaddr); dwmac_mmc_intr_all_mask(priv->mmcaddr);
if (priv->dma_cap.rmon) { if (priv->dma_cap.rmon) {
...@@ -2169,10 +2138,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) ...@@ -2169,10 +2138,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{ {
u32 rx_channels_count = priv->plat->rx_queues_to_use; u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use; u32 tx_channels_count = priv->plat->tx_queues_to_use;
u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
struct stmmac_rx_queue *rx_q; struct stmmac_rx_queue *rx_q;
struct stmmac_tx_queue *tx_q; struct stmmac_tx_queue *tx_q;
u32 dummy_dma_rx_phy = 0;
u32 dummy_dma_tx_phy = 0;
u32 chan = 0; u32 chan = 0;
int atds = 0; int atds = 0;
int ret = 0; int ret = 0;
...@@ -2191,48 +2159,39 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) ...@@ -2191,48 +2159,39 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
return ret; return ret;
} }
if (priv->synopsys_id >= DWMAC_CORE_4_00) { /* DMA RX Channel Configuration */
/* DMA Configuration */ for (chan = 0; chan < rx_channels_count; chan++) {
stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, rx_q = &priv->rx_queue[chan];
dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
rx_q = &priv->rx_queue[chan];
stmmac_init_rx_chan(priv, priv->ioaddr,
priv->plat->dma_cfg, rx_q->dma_rx_phy,
chan);
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
(DMA_RX_SIZE * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
rx_q->rx_tail_addr, chan);
}
/* DMA TX Channel Configuration */
for (chan = 0; chan < tx_channels_count; chan++) {
tx_q = &priv->tx_queue[chan];
stmmac_init_chan(priv, priv->ioaddr, stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
priv->plat->dma_cfg, chan); rx_q->dma_rx_phy, chan);
stmmac_init_tx_chan(priv, priv->ioaddr, rx_q->rx_tail_addr = rx_q->dma_rx_phy +
priv->plat->dma_cfg, tx_q->dma_tx_phy, (DMA_RX_SIZE * sizeof(struct dma_desc));
chan); stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
rx_q->rx_tail_addr, chan);
}
tx_q->tx_tail_addr = tx_q->dma_tx_phy + /* DMA TX Channel Configuration */
(DMA_TX_SIZE * sizeof(struct dma_desc)); for (chan = 0; chan < tx_channels_count; chan++) {
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
}
} else {
rx_q = &priv->rx_queue[chan];
tx_q = &priv->tx_queue[chan]; tx_q = &priv->tx_queue[chan];
stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds); stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
tx_q->tx_tail_addr = tx_q->dma_tx_phy +
(DMA_TX_SIZE * sizeof(struct dma_desc));
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
} }
/* DMA CSR Channel configuration */
for (chan = 0; chan < dma_csr_ch; chan++)
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
/* DMA Configuration */
stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
if (priv->plat->axi) if (priv->plat->axi)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi); stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
...@@ -2515,12 +2474,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) ...@@ -2515,12 +2474,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_core_init(priv, priv->hw, dev); stmmac_core_init(priv, priv->hw, dev);
/* Initialize MTL*/ /* Initialize MTL*/
if (priv->synopsys_id >= DWMAC_CORE_4_00) stmmac_mtl_configuration(priv);
stmmac_mtl_configuration(priv);
/* Initialize Safety Features */ /* Initialize Safety Features */
if (priv->synopsys_id >= DWMAC_CORE_5_10) stmmac_safety_feat_configuration(priv);
stmmac_safety_feat_configuration(priv);
ret = stmmac_rx_ipc(priv, priv->hw); ret = stmmac_rx_ipc(priv, priv->hw);
if (!ret) { if (!ret) {
...@@ -3074,10 +3031,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3074,10 +3031,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (enh_desc) if (enh_desc)
is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
if (unlikely(is_jumbo) && likely(priv->synopsys_id < if (unlikely(is_jumbo)) {
DWMAC_CORE_4_00)) {
entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
if (unlikely(entry < 0)) if (unlikely(entry < 0) && (entry != -EINVAL))
goto dma_map_err; goto dma_map_err;
} }
...@@ -3100,10 +3056,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3100,10 +3056,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
goto dma_map_err; /* should reuse desc w/o issues */ goto dma_map_err; /* should reuse desc w/o issues */
tx_q->tx_skbuff_dma[entry].buf = des; tx_q->tx_skbuff_dma[entry].buf = des;
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
desc->des0 = cpu_to_le32(des); stmmac_set_desc_addr(priv, desc, des);
else
desc->des2 = cpu_to_le32(des);
tx_q->tx_skbuff_dma[entry].map_as_page = true; tx_q->tx_skbuff_dma[entry].map_as_page = true;
tx_q->tx_skbuff_dma[entry].len = len; tx_q->tx_skbuff_dma[entry].len = len;
...@@ -3158,13 +3112,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3158,13 +3112,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* element in case of no SG. * element in case of no SG.
*/ */
priv->tx_count_frames += nfrags + 1; priv->tx_count_frames += nfrags + 1;
if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
!priv->tx_timer_armed) {
mod_timer(&priv->txtimer, mod_timer(&priv->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer)); STMMAC_COAL_TIMER(priv->tx_coal_timer));
priv->tx_timer_armed = true;
} else { } else {
priv->tx_count_frames = 0; priv->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc); stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++; priv->xstats.tx_set_ic_bit++;
priv->tx_timer_armed = false;
} }
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
...@@ -3182,10 +3139,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3182,10 +3139,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
goto dma_map_err; goto dma_map_err;
tx_q->tx_skbuff_dma[first_entry].buf = des; tx_q->tx_skbuff_dma[first_entry].buf = des;
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
first->des0 = cpu_to_le32(des); stmmac_set_desc_addr(priv, first, des);
else
first->des2 = cpu_to_le32(des);
tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
...@@ -3211,11 +3166,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3211,11 +3166,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
if (priv->synopsys_id < DWMAC_CORE_4_00) stmmac_enable_dma_transmission(priv, priv->ioaddr);
stmmac_enable_dma_transmission(priv, priv->ioaddr); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
else
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
queue);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -3299,13 +3251,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) ...@@ -3299,13 +3251,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break; break;
} }
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
p->des1 = 0;
} else {
p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
}
stmmac_refill_desc3(priv, rx_q, p); stmmac_refill_desc3(priv, rx_q, p);
if (rx_q->rx_zeroc_thresh > 0) if (rx_q->rx_zeroc_thresh > 0)
...@@ -3316,10 +3262,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) ...@@ -3316,10 +3262,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
} }
dma_wmb(); dma_wmb();
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) stmmac_set_rx_owner(priv, p, priv->use_riwt);
stmmac_init_rx_desc(priv, p, priv->use_riwt, 0, 0);
else
stmmac_set_rx_owner(priv, p);
dma_wmb(); dma_wmb();
...@@ -3407,11 +3350,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -3407,11 +3350,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
int frame_len; int frame_len;
unsigned int des; unsigned int des;
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) stmmac_get_desc_addr(priv, p, &des);
des = le32_to_cpu(p->des0);
else
des = le32_to_cpu(p->des2);
frame_len = stmmac_get_rx_frame_len(priv, p, coe); frame_len = stmmac_get_rx_frame_len(priv, p, coe);
/* If frame length is greater than skb buffer size /* If frame length is greater than skb buffer size
...@@ -3705,6 +3644,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) ...@@ -3705,6 +3644,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
/* To handle GMAC own interrupts */ /* To handle GMAC own interrupts */
if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
int mtl_status;
if (unlikely(status)) { if (unlikely(status)) {
/* For LPI we need to save the tx status */ /* For LPI we need to save the tx status */
...@@ -3714,20 +3654,18 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) ...@@ -3714,20 +3654,18 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
priv->tx_path_in_lpi_mode = false; priv->tx_path_in_lpi_mode = false;
} }
if (priv->synopsys_id >= DWMAC_CORE_4_00) { for (queue = 0; queue < queues_count; queue++) {
for (queue = 0; queue < queues_count; queue++) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_rx_queue *rx_q =
&priv->rx_queue[queue];
status |= stmmac_host_mtl_irq_status(priv, mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
priv->hw, queue); queue);
if (mtl_status != -EINVAL)
status |= mtl_status;
if (status & CORE_IRQ_MTL_RX_OVERFLOW) if (status & CORE_IRQ_MTL_RX_OVERFLOW)
stmmac_set_rx_tail_ptr(priv, stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
priv->ioaddr, rx_q->rx_tail_addr,
rx_q->rx_tail_addr, queue);
queue);
}
} }
/* PCS link status */ /* PCS link status */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment