Commit eb716a64 authored by David S. Miller's avatar David S. Miller

Merge branch 'stmmac-next'

Jose Abreu says:

====================
net: stmmac: Improvements for -next

[ This is just a rebase of v2 into latest -next in order to avoid a merge
conflict ]

Couple of improvements for -next tree. More info in commit logs.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6c9081a3 ccfc639a
......@@ -354,6 +354,8 @@ struct dma_features {
unsigned int frpbs;
unsigned int frpes;
unsigned int addr64;
unsigned int rssen;
unsigned int vlhash;
};
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
......@@ -381,6 +383,10 @@ struct dma_features {
#define JUMBO_LEN 9000
/* Receive Side Scaling */
#define STMMAC_RSS_HASH_KEY_SIZE 40
#define STMMAC_RSS_MAX_TABLE_SIZE 256
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
......
......@@ -44,6 +44,7 @@
#define XGMAC_CORE_INIT_RX 0
#define XGMAC_PACKET_FILTER 0x00000008
#define XGMAC_FILTER_RA BIT(31)
#define XGMAC_FILTER_VTFE BIT(16)
#define XGMAC_FILTER_HPF BIT(10)
#define XGMAC_FILTER_PCF BIT(7)
#define XGMAC_FILTER_PM BIT(4)
......@@ -51,6 +52,14 @@
#define XGMAC_FILTER_PR BIT(0)
#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
#define XGMAC_MAX_HASH_TABLE 8
#define XGMAC_VLAN_TAG 0x00000050
#define XGMAC_VLAN_EDVLP BIT(26)
#define XGMAC_VLAN_VTHM BIT(25)
#define XGMAC_VLAN_DOVLTC BIT(20)
#define XGMAC_VLAN_ESVL BIT(18)
#define XGMAC_VLAN_ETV BIT(16)
#define XGMAC_VLAN_VID GENMASK(15, 0)
#define XGMAC_VLAN_HASH_TABLE 0x00000058
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
......@@ -84,10 +93,13 @@
#define XGMAC_HWFEAT_AVSEL BIT(11)
#define XGMAC_HWFEAT_RAVSEL BIT(10)
#define XGMAC_HWFEAT_ARPOFFSEL BIT(9)
#define XGMAC_HWFEAT_MMCSEL BIT(8)
#define XGMAC_HWFEAT_MGKSEL BIT(7)
#define XGMAC_HWFEAT_RWKSEL BIT(6)
#define XGMAC_HWFEAT_VLHASH BIT(4)
#define XGMAC_HWFEAT_GMIISEL BIT(1)
#define XGMAC_HW_FEATURE1 0x00000120
#define XGMAC_HWFEAT_RSSEN BIT(20)
#define XGMAC_HWFEAT_TSOEN BIT(18)
#define XGMAC_HWFEAT_ADDR64 GENMASK(15, 14)
#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6)
......@@ -98,6 +110,15 @@
#define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12)
#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
#define XGMAC_HW_FEATURE3 0x00000128
#define XGMAC_HWFEAT_ASP GENMASK(15, 14)
#define XGMAC_HWFEAT_FRPES GENMASK(12, 11)
#define XGMAC_HWFEAT_FRPPB GENMASK(10, 9)
#define XGMAC_HWFEAT_FRPSEL BIT(3)
#define XGMAC_MAC_DPP_FSM_INT_STATUS 0x00000150
#define XGMAC_MAC_FSM_CONTROL 0x00000158
#define XGMAC_PRTYEN BIT(1)
#define XGMAC_TMOUTEN BIT(0)
#define XGMAC_MDIO_ADDR 0x00000200
#define XGMAC_MDIO_DATA 0x00000204
#define XGMAC_MDIO_C22P 0x00000220
......@@ -108,6 +129,17 @@
#define XGMAC_DCS_SHIFT 16
#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
#define XGMAC_ARP_ADDR 0x00000c10
#define XGMAC_RSS_CTRL 0x00000c80
#define XGMAC_UDP4TE BIT(3)
#define XGMAC_TCP4TE BIT(2)
#define XGMAC_IP2TE BIT(1)
#define XGMAC_RSSE BIT(0)
#define XGMAC_RSS_ADDR 0x00000c88
#define XGMAC_RSSIA_SHIFT 8
#define XGMAC_ADDRT BIT(2)
#define XGMAC_CT BIT(1)
#define XGMAC_OB BIT(0)
#define XGMAC_RSS_DATA 0x00000c8c
#define XGMAC_TIMESTAMP_STATUS 0x00000d20
#define XGMAC_TXTSC BIT(15)
#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30
......@@ -116,6 +148,7 @@
/* MTL Registers */
#define XGMAC_MTL_OPMODE 0x00001000
#define XGMAC_FRPE BIT(15)
#define XGMAC_ETSALG GENMASK(6, 5)
#define XGMAC_WRR (0x0 << 5)
#define XGMAC_WFQ (0x1 << 5)
......@@ -124,8 +157,32 @@
#define XGMAC_MTL_INT_STATUS 0x00001020
#define XGMAC_MTL_RXQ_DMA_MAP0 0x00001030
#define XGMAC_MTL_RXQ_DMA_MAP1 0x00001034
#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 3, (x) * 8)
#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_QxMDMACH_SHIFT(x) ((x) * 8)
#define XGMAC_QDDMACH BIT(7)
#define XGMAC_TC_PRTY_MAP0 0x00001040
#define XGMAC_TC_PRTY_MAP1 0x00001044
#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_PSTC_SHIFT(x) ((x) * 8)
#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0
#define XGMAC_RXPI BIT(31)
#define XGMAC_NPE GENMASK(23, 16)
#define XGMAC_NVE GENMASK(7, 0)
#define XGMAC_MTL_RXP_IACC_CTRL_ST 0x000010b0
#define XGMAC_STARTBUSY BIT(31)
#define XGMAC_WRRDN BIT(16)
#define XGMAC_ADDR GENMASK(9, 0)
#define XGMAC_MTL_RXP_IACC_DATA 0x000010b4
#define XGMAC_MTL_ECC_CONTROL 0x000010c0
#define XGMAC_MTL_SAFETY_INT_STATUS 0x000010c4
#define XGMAC_MEUIS BIT(1)
#define XGMAC_MECIS BIT(0)
#define XGMAC_MTL_ECC_INT_ENABLE 0x000010c8
#define XGMAC_RPCEIE BIT(12)
#define XGMAC_ECEIE BIT(8)
#define XGMAC_RXCEIE BIT(4)
#define XGMAC_TXCEIE BIT(0)
#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
#define XGMAC_TQS_SHIFT 16
......@@ -190,6 +247,16 @@
#define XGMAC_TDPS GENMASK(29, 0)
#define XGMAC_RX_EDMA_CTRL 0x00003044
#define XGMAC_RDPS GENMASK(29, 0)
#define XGMAC_DMA_SAFETY_INT_STATUS 0x00003064
#define XGMAC_MCSIS BIT(31)
#define XGMAC_MSUIS BIT(29)
#define XGMAC_MSCIS BIT(28)
#define XGMAC_DEUIS BIT(1)
#define XGMAC_DECIS BIT(0)
#define XGMAC_DMA_ECC_INT_ENABLE 0x00003068
#define XGMAC_DCEIE BIT(1)
#define XGMAC_TCEIE BIT(0)
#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
#define XGMAC_PBLx8 BIT(16)
#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
......@@ -256,6 +323,13 @@
#define XGMAC_RDES3_IOC BIT(30)
#define XGMAC_RDES3_LD BIT(28)
#define XGMAC_RDES3_CDA BIT(27)
#define XGMAC_RDES3_RSV BIT(26)
#define XGMAC_RDES3_L34T GENMASK(23, 20)
#define XGMAC_RDES3_L34T_SHIFT 20
#define XGMAC_L34T_IP4TCP 0x1
#define XGMAC_L34T_IP4UDP 0x2
#define XGMAC_L34T_IP6TCP 0x9
#define XGMAC_L34T_IP6UDP 0xA
#define XGMAC_RDES3_ES BIT(15)
#define XGMAC_RDES3_PL GENMASK(13, 0)
#define XGMAC_RDES3_TSD BIT(6)
......
......@@ -6,6 +6,7 @@
#include <linux/bitrev.h>
#include <linux/crc32.h>
#include <linux/iopoll.h>
#include "stmmac.h"
#include "dwxgmac2.h"
......@@ -118,6 +119,23 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
writel(value, ioaddr + reg);
}
static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
u32 value, reg;
reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
if (queue >= 4)
queue -= 4;
value = readl(ioaddr + reg);
value &= ~XGMAC_PSTC(queue);
value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
writel(value, ioaddr + reg);
}
static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
u32 rx_alg)
{
......@@ -144,7 +162,9 @@ static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
u32 tx_alg)
{
void __iomem *ioaddr = hw->pcsr;
bool ets = true;
u32 value;
int i;
value = readl(ioaddr + XGMAC_MTL_OPMODE);
value &= ~XGMAC_ETSALG;
......@@ -160,10 +180,28 @@ static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
value |= XGMAC_DWRR;
break;
default:
ets = false;
break;
}
writel(value, ioaddr + XGMAC_MTL_OPMODE);
/* Set ETS if desired */
for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
value &= ~XGMAC_TSA;
if (ets)
value |= XGMAC_ETS;
writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
}
}
static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
u32 weight, u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
}
static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
......@@ -402,17 +440,574 @@ static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
u32 val)
{
u32 ctrl = 0;
writel(val, ioaddr + XGMAC_RSS_DATA);
ctrl |= idx << XGMAC_RSSIA_SHIFT;
ctrl |= is_key ? XGMAC_ADDRT : 0x0;
ctrl |= XGMAC_OB;
writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
!(ctrl & XGMAC_OB), 100, 10000);
}
static int dwxgmac2_rss_configure(struct mac_device_info *hw,
struct stmmac_rss *cfg, u32 num_rxq)
{
void __iomem *ioaddr = hw->pcsr;
u32 *key = (u32 *)cfg->key;
int i, ret;
u32 value;
value = readl(ioaddr + XGMAC_RSS_CTRL);
if (!cfg->enable) {
value &= ~XGMAC_RSSE;
writel(value, ioaddr + XGMAC_RSS_CTRL);
return 0;
}
for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) {
ret = dwxgmac2_rss_write_reg(ioaddr, true, i, *key++);
if (ret)
return ret;
}
for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
if (ret)
return ret;
}
for (i = 0; i < num_rxq; i++)
dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
writel(value, ioaddr + XGMAC_RSS_CTRL);
return 0;
}
static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
if (hash) {
u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
value |= XGMAC_FILTER_VTFE;
writel(value, ioaddr + XGMAC_PACKET_FILTER);
value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
if (is_double) {
value |= XGMAC_VLAN_EDVLP;
value |= XGMAC_VLAN_ESVL;
value |= XGMAC_VLAN_DOVLTC;
}
writel(value, ioaddr + XGMAC_VLAN_TAG);
} else {
u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
value &= ~XGMAC_FILTER_VTFE;
writel(value, ioaddr + XGMAC_PACKET_FILTER);
value = readl(ioaddr + XGMAC_VLAN_TAG);
value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
value &= ~XGMAC_VLAN_DOVLTC;
value &= ~XGMAC_VLAN_VID;
writel(value, ioaddr + XGMAC_VLAN_TAG);
}
}
struct dwxgmac3_error_desc {
bool valid;
const char *desc;
const char *detailed_desc;
};
#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
const char *module_name,
const struct dwxgmac3_error_desc *desc,
unsigned long field_offset,
struct stmmac_safety_stats *stats)
{
unsigned long loc, mask;
u8 *bptr = (u8 *)stats;
unsigned long *ptr;
ptr = (unsigned long *)(bptr + field_offset);
mask = value;
for_each_set_bit(loc, &mask, 32) {
netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
"correctable" : "uncorrectable", module_name,
desc[loc].desc, desc[loc].detailed_desc);
/* Update counters */
ptr[loc]++;
}
}
static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
{ true, "ATPES", "Application Transmit Interface Parity Check Error" },
{ true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
{ true, "TPES", "TSO Data Path Parity Check Error" },
{ true, "TSOPES", "TSO Header Data Path Parity Check Error" },
{ true, "MTPES", "MTL Data Path Parity Check Error" },
{ true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
{ true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
{ true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
{ true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
{ true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
{ true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
{ true, "CWPES", "CSR Write Data Path Parity Check Error" },
{ true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
{ true, "TTES", "TX FSM Timeout Error" },
{ true, "RTES", "RX FSM Timeout Error" },
{ true, "CTES", "CSR FSM Timeout Error" },
{ true, "ATES", "APP FSM Timeout Error" },
{ true, "PTES", "PTP FSM Timeout Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
{ true, "MSTTES", "Master Read/Write Timeout Error" },
{ true, "SLVTES", "Slave Read/Write Timeout Error" },
{ true, "ATITES", "Application Timeout on ATI Interface Error" },
{ true, "ARITES", "Application Timeout on ARI Interface Error" },
{ true, "FSMPES", "FSM State Parity Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
{ true, "CPI", "Control Register Parity Check Error" },
};
static void dwxgmac3_handle_mac_err(struct net_device *ndev,
void __iomem *ioaddr, bool correctable,
struct stmmac_safety_stats *stats)
{
u32 value;
value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
dwxgmac3_log_error(ndev, value, correctable, "MAC",
dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
}
static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
{ true, "TXCES", "MTL TX Memory Error" },
{ true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
{ true, "TXUES", "MTL TX Memory Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
{ true, "RXCES", "MTL RX Memory Error" },
{ true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
{ true, "RXUES", "MTL RX Memory Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
{ true, "ECES", "MTL EST Memory Error" },
{ true, "EAMS", "MTL EST Memory Address Mismatch Error" },
{ true, "EUES", "MTL EST Memory Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
{ true, "RPCES", "MTL RX Parser Memory Error" },
{ true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
{ true, "RPUES", "MTL RX Parser Memory Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
};
static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
void __iomem *ioaddr, bool correctable,
struct stmmac_safety_stats *stats)
{
u32 value;
value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
dwxgmac3_log_error(ndev, value, correctable, "MTL",
dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
}
static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
{ true, "TCES", "DMA TSO Memory Error" },
{ true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
{ true, "TUES", "DMA TSO Memory Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
{ true, "DCES", "DMA DCACHE Memory Error" },
{ true, "DAMS", "DMA DCACHE Address Mismatch Error" },
{ true, "DUES", "DMA DCACHE Memory Error" },
{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
{ false, "UNKNOWN", "Unknown Error" }, /* 8 */
{ false, "UNKNOWN", "Unknown Error" }, /* 9 */
{ false, "UNKNOWN", "Unknown Error" }, /* 10 */
{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
{ false, "UNKNOWN", "Unknown Error" }, /* 12 */
{ false, "UNKNOWN", "Unknown Error" }, /* 13 */
{ false, "UNKNOWN", "Unknown Error" }, /* 14 */
{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
};
static void dwxgmac3_handle_dma_err(struct net_device *ndev,
void __iomem *ioaddr, bool correctable,
struct stmmac_safety_stats *stats)
{
u32 value;
value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
dwxgmac3_log_error(ndev, value, correctable, "DMA",
dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
}
static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
{
u32 value;
if (!asp)
return -EINVAL;
/* 1. Enable Safety Features */
writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
/* 2. Enable MTL Safety Interrupts */
value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
/* 3. Enable DMA Safety Interrupts */
value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
/* Only ECC Protection for External Memory feature is selected */
if (asp <= 0x1)
return 0;
/* 4. Enable Parity and Timeout for FSM */
value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
value |= XGMAC_PRTYEN; /* FSM Parity Feature */
value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
return 0;
}
static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
void __iomem *ioaddr,
unsigned int asp,
struct stmmac_safety_stats *stats)
{
bool err, corr;
u32 mtl, dma;
int ret = 0;
if (!asp)
return -EINVAL;
mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
corr = false;
if (err) {
dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
ret |= !corr;
}
err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
(dma & (XGMAC_MSUIS | XGMAC_MSCIS));
corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
if (err) {
dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
ret |= !corr;
}
err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
corr = dma & XGMAC_DECIS;
if (err) {
dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
ret |= !corr;
}
return ret;
}
static const struct dwxgmac3_error {
const struct dwxgmac3_error_desc *desc;
} dwxgmac3_all_errors[] = {
{ dwxgmac3_mac_errors },
{ dwxgmac3_mtl_errors },
{ dwxgmac3_dma_errors },
};
static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
int index, unsigned long *count,
const char **desc)
{
int module = index / 32, offset = index % 32;
unsigned long *ptr = (unsigned long *)stats;
if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
return -EINVAL;
if (!dwxgmac3_all_errors[module].desc[offset].valid)
return -EINVAL;
if (count)
*count = *(ptr + index);
if (desc)
*desc = dwxgmac3_all_errors[module].desc[offset].desc;
return 0;
}
static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
{
u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
val &= ~XGMAC_FRPE;
writel(val, ioaddr + XGMAC_MTL_OPMODE);
return 0;
}
static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
{
u32 val;
val = readl(ioaddr + XGMAC_MTL_OPMODE);
val |= XGMAC_FRPE;
writel(val, ioaddr + XGMAC_MTL_OPMODE);
}
static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
struct stmmac_tc_entry *entry,
int pos)
{
int ret, i;
for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
u32 val;
/* Wait for ready */
ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
val, !(val & XGMAC_STARTBUSY), 1, 10000);
if (ret)
return ret;
/* Write data */
val = *((u32 *)&entry->val + i);
writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
/* Write pos */
val = real_pos & XGMAC_ADDR;
writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
/* Write OP */
val |= XGMAC_WRRDN;
writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
/* Start Write */
val |= XGMAC_STARTBUSY;
writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
/* Wait for done */
ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
val, !(val & XGMAC_STARTBUSY), 1, 10000);
if (ret)
return ret;
}
return 0;
}
static struct stmmac_tc_entry *
dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
unsigned int count, u32 curr_prio)
{
struct stmmac_tc_entry *entry;
u32 min_prio = ~0x0;
int i, min_prio_idx;
bool found = false;
for (i = count - 1; i >= 0; i--) {
entry = &entries[i];
/* Do not update unused entries */
if (!entry->in_use)
continue;
/* Do not update already updated entries (i.e. fragments) */
if (entry->in_hw)
continue;
/* Let last entry be updated last */
if (entry->is_last)
continue;
/* Do not return fragments */
if (entry->is_frag)
continue;
/* Check if we already checked this prio */
if (entry->prio < curr_prio)
continue;
/* Check if this is the minimum prio */
if (entry->prio < min_prio) {
min_prio = entry->prio;
min_prio_idx = i;
found = true;
}
}
if (found)
return &entries[min_prio_idx];
return NULL;
}
static int dwxgmac3_rxp_config(void __iomem *ioaddr,
struct stmmac_tc_entry *entries,
unsigned int count)
{
struct stmmac_tc_entry *entry, *frag;
int i, ret, nve = 0;
u32 curr_prio = 0;
u32 old_val, val;
/* Force disable RX */
old_val = readl(ioaddr + XGMAC_RX_CONFIG);
val = old_val & ~XGMAC_CONFIG_RE;
writel(val, ioaddr + XGMAC_RX_CONFIG);
/* Disable RX Parser */
ret = dwxgmac3_rxp_disable(ioaddr);
if (ret)
goto re_enable;
/* Set all entries as NOT in HW */
for (i = 0; i < count; i++) {
entry = &entries[i];
entry->in_hw = false;
}
/* Update entries by reverse order */
while (1) {
entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
if (!entry)
break;
curr_prio = entry->prio;
frag = entry->frag_ptr;
/* Set special fragment requirements */
if (frag) {
entry->val.af = 0;
entry->val.rf = 0;
entry->val.nc = 1;
entry->val.ok_index = nve + 2;
}
ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
if (ret)
goto re_enable;
entry->table_pos = nve++;
entry->in_hw = true;
if (frag && !frag->in_hw) {
ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
if (ret)
goto re_enable;
frag->table_pos = nve++;
frag->in_hw = true;
}
}
if (!nve)
goto re_enable;
/* Update all pass entry */
for (i = 0; i < count; i++) {
entry = &entries[i];
if (!entry->is_last)
continue;
ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
if (ret)
goto re_enable;
entry->table_pos = nve++;
}
/* Assume n. of parsable entries == n. of valid entries */
val = (nve << 16) & XGMAC_NPE;
val |= nve & XGMAC_NVE;
writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
/* Enable RX Parser */
dwxgmac3_rxp_enable(ioaddr);
re_enable:
/* Re-enable RX */
writel(old_val, ioaddr + XGMAC_RX_CONFIG);
return ret;
}
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
.rx_queue_prio = dwxgmac2_rx_queue_prio,
.tx_queue_prio = NULL,
.tx_queue_prio = dwxgmac2_tx_queue_prio,
.rx_queue_routing = NULL,
.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
.set_mtl_tx_queue_weight = NULL,
.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
.config_cbs = dwxgmac2_config_cbs,
.dump_regs = NULL,
......@@ -431,7 +1026,13 @@ const struct stmmac_ops dwxgmac210_ops = {
.pcs_get_adv_lp = NULL,
.debug = NULL,
.set_filter = dwxgmac2_set_filter,
.safety_feat_config = dwxgmac3_safety_feat_config,
.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
.safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
.rss_configure = dwxgmac2_rss_configure,
.update_vlan_hash = dwxgmac2_update_vlan_hash,
.rxp_config = dwxgmac3_rxp_config,
};
int dwxgmac2_setup(struct stmmac_priv *priv)
......
......@@ -254,6 +254,34 @@ static void dwxgmac2_clear(struct dma_desc *p)
p->des3 = 0;
}
static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
enum pkt_hash_types *type)
{
unsigned int rdes3 = le32_to_cpu(p->des3);
u32 ptype;
if (rdes3 & XGMAC_RDES3_RSV) {
ptype = (rdes3 & XGMAC_RDES3_L34T) >> XGMAC_RDES3_L34T_SHIFT;
switch (ptype) {
case XGMAC_L34T_IP4TCP:
case XGMAC_L34T_IP4UDP:
case XGMAC_L34T_IP6TCP:
case XGMAC_L34T_IP6UDP:
*type = PKT_HASH_TYPE_L4;
break;
default:
*type = PKT_HASH_TYPE_L3;
break;
}
*hash = le32_to_cpu(p->des1);
return 0;
}
return -EINVAL;
}
const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.tx_status = dwxgmac2_get_tx_status,
.rx_status = dwxgmac2_get_rx_status,
......@@ -277,4 +305,5 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.get_addr = dwxgmac2_get_addr,
.set_addr = dwxgmac2_set_addr,
.clear = dwxgmac2_clear,
.get_rx_hash = dwxgmac2_get_rx_hash,
};
......@@ -356,12 +356,15 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
......@@ -396,6 +399,13 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
dma_cap->number_rx_queues =
((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
/* MAC HW feature 3 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
}
static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
......
......@@ -201,7 +201,7 @@ static const struct stmmac_hwif_entry {
.min_id = DWXGMAC_CORE_2_10,
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
.mmc_off = 0,
.mmc_off = MMC_XGMAC_OFFSET,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
......@@ -209,7 +209,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
.tc = &dwmac510_tc_ops,
.mmc = NULL,
.mmc = &dwxgmac_mmc_ops,
.setup = dwxgmac2_setup,
.quirks = NULL,
},
......
......@@ -86,6 +86,9 @@ struct stmmac_desc_ops {
void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
/* clear descriptor */
void (*clear)(struct dma_desc *p);
/* RSS */
int (*get_rx_hash)(struct dma_desc *p, u32 *hash,
enum pkt_hash_types *type);
};
#define stmmac_init_rx_desc(__priv, __args...) \
......@@ -136,6 +139,8 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, set_addr, __args)
#define stmmac_clear_desc(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, clear, __args)
#define stmmac_get_rx_hash(__priv, __args...) \
stmmac_do_callback(__priv, desc, get_rx_hash, __args)
struct stmmac_dma_cfg;
struct dma_features;
......@@ -249,6 +254,7 @@ struct rgmii_adv;
struct stmmac_safety_stats;
struct stmmac_tc_entry;
struct stmmac_pps_cfg;
struct stmmac_rss;
/* Helpers to program the MAC core */
struct stmmac_ops {
......@@ -327,6 +333,12 @@ struct stmmac_ops {
u32 sub_second_inc, u32 systime_flags);
/* Loopback for selftests */
void (*set_mac_loopback)(void __iomem *ioaddr, bool enable);
/* RSS */
int (*rss_configure)(struct mac_device_info *hw,
struct stmmac_rss *cfg, u32 num_rxq);
/* VLAN */
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
bool is_double);
};
#define stmmac_core_init(__priv, __args...) \
......@@ -397,6 +409,10 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, flex_pps_config, __args)
#define stmmac_set_mac_loopback(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
#define stmmac_rss_configure(__priv, __args...) \
stmmac_do_callback(__priv, mac, rss_configure, __args)
#define stmmac_update_vlan_hash(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
......@@ -503,6 +519,7 @@ extern const struct stmmac_ops dwxgmac210_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
extern const struct stmmac_mmc_ops dwmac_mmc_ops;
extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
......
......@@ -24,6 +24,7 @@
#define MMC_GMAC4_OFFSET 0x700
#define MMC_GMAC3_X_OFFSET 0x100
#define MMC_XGMAC_OFFSET 0x800
struct stmmac_counters {
unsigned int mmc_tx_octetcount_gb;
......@@ -116,6 +117,14 @@ struct stmmac_counters {
unsigned int mmc_rx_tcp_err_octets;
unsigned int mmc_rx_icmp_gd_octets;
unsigned int mmc_rx_icmp_err_octets;
/* FPE */
unsigned int mmc_tx_fpe_fragment_cntr;
unsigned int mmc_tx_hold_req_cntr;
unsigned int mmc_rx_packet_assembly_err_cntr;
unsigned int mmc_rx_packet_smd_err_cntr;
unsigned int mmc_rx_packet_assembly_ok_cntr;
unsigned int mmc_rx_fpe_fragment_cntr;
};
#endif /* __MMC_H__ */
......@@ -119,6 +119,64 @@
#define MMC_RX_ICMP_GD_OCTETS 0x180
#define MMC_RX_ICMP_ERR_OCTETS 0x184
/* XGMAC MMC Registers */
#define MMC_XGMAC_TX_OCTET_GB 0x14
#define MMC_XGMAC_TX_PKT_GB 0x1c
#define MMC_XGMAC_TX_BROAD_PKT_G 0x24
#define MMC_XGMAC_TX_MULTI_PKT_G 0x2c
#define MMC_XGMAC_TX_64OCT_GB 0x34
#define MMC_XGMAC_TX_65OCT_GB 0x3c
#define MMC_XGMAC_TX_128OCT_GB 0x44
#define MMC_XGMAC_TX_256OCT_GB 0x4c
#define MMC_XGMAC_TX_512OCT_GB 0x54
#define MMC_XGMAC_TX_1024OCT_GB 0x5c
#define MMC_XGMAC_TX_UNI_PKT_GB 0x64
#define MMC_XGMAC_TX_MULTI_PKT_GB 0x6c
#define MMC_XGMAC_TX_BROAD_PKT_GB 0x74
#define MMC_XGMAC_TX_UNDER 0x7c
#define MMC_XGMAC_TX_OCTET_G 0x84
#define MMC_XGMAC_TX_PKT_G 0x8c
#define MMC_XGMAC_TX_PAUSE 0x94
#define MMC_XGMAC_TX_VLAN_PKT_G 0x9c
#define MMC_XGMAC_TX_LPI_USEC 0xa4
#define MMC_XGMAC_TX_LPI_TRAN 0xa8
#define MMC_XGMAC_RX_PKT_GB 0x100
#define MMC_XGMAC_RX_OCTET_GB 0x108
#define MMC_XGMAC_RX_OCTET_G 0x110
#define MMC_XGMAC_RX_BROAD_PKT_G 0x118
#define MMC_XGMAC_RX_MULTI_PKT_G 0x120
#define MMC_XGMAC_RX_CRC_ERR 0x128
#define MMC_XGMAC_RX_RUNT_ERR 0x130
#define MMC_XGMAC_RX_JABBER_ERR 0x134
#define MMC_XGMAC_RX_UNDER 0x138
#define MMC_XGMAC_RX_OVER 0x13c
#define MMC_XGMAC_RX_64OCT_GB 0x140
#define MMC_XGMAC_RX_65OCT_GB 0x148
#define MMC_XGMAC_RX_128OCT_GB 0x150
#define MMC_XGMAC_RX_256OCT_GB 0x158
#define MMC_XGMAC_RX_512OCT_GB 0x160
#define MMC_XGMAC_RX_1024OCT_GB 0x168
#define MMC_XGMAC_RX_UNI_PKT_G 0x170
#define MMC_XGMAC_RX_LENGTH_ERR 0x178
#define MMC_XGMAC_RX_RANGE 0x180
#define MMC_XGMAC_RX_PAUSE 0x188
#define MMC_XGMAC_RX_FIFOOVER_PKT 0x190
#define MMC_XGMAC_RX_VLAN_PKT_GB 0x198
#define MMC_XGMAC_RX_WATCHDOG_ERR 0x1a0
#define MMC_XGMAC_RX_LPI_USEC 0x1a4
#define MMC_XGMAC_RX_LPI_TRAN 0x1a8
#define MMC_XGMAC_RX_DISCARD_PKT_GB 0x1ac
#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
#define MMC_XGMAC_TX_FPE_FRAG 0x208
#define MMC_XGMAC_TX_HOLD_REQ 0x20c
#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
#define MMC_XGMAC_RX_FPE_FRAG 0x234
static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
u32 value = readl(mmcaddr + MMC_CNTRL);
......@@ -263,3 +321,137 @@ const struct stmmac_mmc_ops dwmac_mmc_ops = {
.intr_all_mask = dwmac_mmc_intr_all_mask,
.read = dwmac_mmc_read,
};
static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
u32 value = readl(mmcaddr + MMC_CNTRL);
value |= (mode & 0x3F);
writel(value, mmcaddr + MMC_CNTRL);
}
static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
}
static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest)
{
u64 tmp = 0;
tmp += readl(addr + reg);
tmp += ((u64 )readl(addr + reg + 0x4)) << 32;
if (tmp > GENMASK(31, 0))
*dest = ~0x0;
else
*dest = *dest + tmp;
}
/* This reads the MAC core counters (if actaully supported).
* by default the MMC core is programmed to reset each
* counter after a read. So all the field of the mmc struct
* have to be incremented.
*/
static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
{
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_GB,
&mmc->mmc_tx_octetcount_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_GB,
&mmc->mmc_tx_framecount_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_G,
&mmc->mmc_tx_broadcastframe_g);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_G,
&mmc->mmc_tx_multicastframe_g);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_64OCT_GB,
&mmc->mmc_tx_64_octets_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_65OCT_GB,
&mmc->mmc_tx_65_to_127_octets_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_128OCT_GB,
&mmc->mmc_tx_128_to_255_octets_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_256OCT_GB,
&mmc->mmc_tx_256_to_511_octets_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_512OCT_GB,
&mmc->mmc_tx_512_to_1023_octets_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_1024OCT_GB,
&mmc->mmc_tx_1024_to_max_octets_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNI_PKT_GB,
&mmc->mmc_tx_unicast_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_GB,
&mmc->mmc_tx_multicast_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_GB,
&mmc->mmc_tx_broadcast_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNDER,
&mmc->mmc_tx_underflow_error);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_G,
&mmc->mmc_tx_octetcount_g);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_G,
&mmc->mmc_tx_framecount_g);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PAUSE,
&mmc->mmc_tx_pause_frame);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_VLAN_PKT_G,
&mmc->mmc_tx_vlan_frame_g);
/* MMC RX counter registers */
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PKT_GB,
&mmc->mmc_rx_framecount_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_GB,
&mmc->mmc_rx_octetcount_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_G,
&mmc->mmc_rx_octetcount_g);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_BROAD_PKT_G,
&mmc->mmc_rx_broadcastframe_g);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_MULTI_PKT_G,
&mmc->mmc_rx_multicastframe_g);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR,
&mmc->mmc_rx_crc_error);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR,
&mmc->mmc_rx_crc_error);
mmc->mmc_rx_run_error += readl(mmcaddr + MMC_XGMAC_RX_RUNT_ERR);
mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_XGMAC_RX_JABBER_ERR);
mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_XGMAC_RX_UNDER);
mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_XGMAC_RX_OVER);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_64OCT_GB,
&mmc->mmc_rx_64_octets_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_65OCT_GB,
&mmc->mmc_rx_65_to_127_octets_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_128OCT_GB,
&mmc->mmc_rx_128_to_255_octets_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_256OCT_GB,
&mmc->mmc_rx_256_to_511_octets_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_512OCT_GB,
&mmc->mmc_rx_512_to_1023_octets_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_1024OCT_GB,
&mmc->mmc_rx_1024_to_max_octets_gb);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UNI_PKT_G,
&mmc->mmc_rx_unicast_g);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_LENGTH_ERR,
&mmc->mmc_rx_length_error);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_RANGE,
&mmc->mmc_rx_autofrangetype);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PAUSE,
&mmc->mmc_rx_pause_frames);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_FIFOOVER_PKT,
&mmc->mmc_rx_fifo_overflow);
dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_VLAN_PKT_GB,
&mmc->mmc_rx_vlan_frames_gb);
mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_XGMAC_RX_WATCHDOG_ERR);
mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_XGMAC_TX_FPE_FRAG);
mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_XGMAC_TX_HOLD_REQ);
mmc->mmc_rx_packet_assembly_err_cntr +=
readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_ERR);
mmc->mmc_rx_packet_smd_err_cntr +=
readl(mmcaddr + MMC_XGMAC_RX_PKT_SMD_ERR);
mmc->mmc_rx_packet_assembly_ok_cntr +=
readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_OK);
mmc->mmc_rx_fpe_fragment_cntr +=
readl(mmcaddr + MMC_XGMAC_RX_FPE_FRAG);
}
const struct stmmac_mmc_ops dwxgmac_mmc_ops = {
.ctrl = dwxgmac_mmc_ctrl,
.intr_all_mask = dwxgmac_mmc_intr_all_mask,
.read = dwxgmac_mmc_read,
};
......@@ -13,6 +13,7 @@
#define DRV_MODULE_VERSION "Jan_2016"
#include <linux/clk.h>
#include <linux/if_vlan.h>
#include <linux/stmmac.h>
#include <linux/phylink.h>
#include <linux/pci.h>
......@@ -113,6 +114,12 @@ struct stmmac_pps_cfg {
struct timespec64 period;
};
struct stmmac_rss {
int enable;
u8 key[STMMAC_RSS_HASH_KEY_SIZE];
u32 table[STMMAC_RSS_MAX_TABLE_SIZE];
};
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames;
......@@ -185,6 +192,7 @@ struct stmmac_priv {
spinlock_t ptp_lock;
void __iomem *mmcaddr;
void __iomem *ptpaddr;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
......@@ -203,6 +211,9 @@ struct stmmac_priv {
/* Pulse Per Second output */
struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
/* Receive Side Scaling */
struct stmmac_rss rss;
};
enum stmmac_state {
......
......@@ -243,6 +243,12 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr),
STMMAC_MMC_STAT(mmc_tx_hold_req_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr),
STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr),
STMMAC_MMC_STAT(mmc_rx_fpe_fragment_cntr),
};
#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
......@@ -758,6 +764,76 @@ static int stmmac_set_coalesce(struct net_device *dev,
return 0;
}
static int stmmac_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
{
struct stmmac_priv *priv = netdev_priv(dev);
switch (rxnfc->cmd) {
case ETHTOOL_GRXRINGS:
rxnfc->data = priv->plat->rx_queues_to_use;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static u32 stmmac_get_rxfh_key_size(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
return sizeof(priv->rss.key);
}
static u32 stmmac_get_rxfh_indir_size(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
return ARRAY_SIZE(priv->rss.table);
}
static int stmmac_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct stmmac_priv *priv = netdev_priv(dev);
int i;
if (indir) {
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
indir[i] = priv->rss.table[i];
}
if (key)
memcpy(key, priv->rss.key, sizeof(priv->rss.key));
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
return 0;
}
static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct stmmac_priv *priv = netdev_priv(dev);
int i;
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
if (indir) {
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
priv->rss.table[i] = indir[i];
}
if (key)
memcpy(priv->rss.key, key, sizeof(priv->rss.key));
return stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
}
static int stmmac_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
......@@ -849,6 +925,11 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_eee = stmmac_ethtool_op_get_eee,
.set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
.get_rxnfc = stmmac_get_rxnfc,
.get_rxfh_key_size = stmmac_get_rxfh_key_size,
.get_rxfh_indir_size = stmmac_get_rxfh_indir_size,
.get_rxfh = stmmac_get_rxfh,
.set_rxfh = stmmac_set_rxfh,
.get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
......
......@@ -2417,6 +2417,22 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
}
}
static void stmmac_mac_config_rss(struct stmmac_priv *priv)
{
if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
priv->rss.enable = false;
return;
}
if (priv->dev->features & NETIF_F_RXHASH)
priv->rss.enable = true;
else
priv->rss.enable = false;
stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
}
/**
* stmmac_mtl_configuration - Configure MTL
* @priv: driver private structure
......@@ -2461,6 +2477,10 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv)
/* Set RX routing */
if (rx_queues_count > 1)
stmmac_mac_config_rx_queues_routing(priv);
/* Receive Side Scaling */
if (rx_queues_count > 1)
stmmac_mac_config_rss(priv);
}
static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
......@@ -3385,9 +3405,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
priv->dev->stats.rx_errors++;
buf->page = NULL;
} else {
enum pkt_hash_types hash_type;
struct sk_buff *skb;
int frame_len;
unsigned int des;
int frame_len;
u32 hash;
stmmac_get_desc_addr(priv, p, &des);
frame_len = stmmac_get_rx_frame_len(priv, p, coe);
......@@ -3452,6 +3474,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
skb_set_hash(skb, hash, hash_type);
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rx_napi, skb);
/* Data payload copied into SKB, page ready for recycle */
......@@ -4011,6 +4037,79 @@ static void stmmac_exit_fs(struct net_device *dev)
}
#endif /* CONFIG_DEBUG_FS */
static u32 stmmac_vid_crc32_le(__le16 vid_le)
{
unsigned char *data = (unsigned char *)&vid_le;
unsigned char data_byte = 0;
u32 crc = ~0x0;
u32 temp = 0;
int i, bits;
bits = get_bitmask_order(VLAN_VID_MASK);
for (i = 0; i < bits; i++) {
if ((i % 8) == 0)
data_byte = data[i / 8];
temp = ((crc & 1) ^ data_byte) & 1;
crc >>= 1;
data_byte >>= 1;
if (temp)
crc ^= 0xedb88320;
}
return crc;
}
static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
{
u32 crc, hash = 0;
u16 vid;
for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
__le16 vid_le = cpu_to_le16(vid);
crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
hash |= (1 << crc);
}
return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double);
}
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
bool is_double = false;
int ret;
if (!priv->dma_cap.vlhash)
return -EOPNOTSUPP;
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
set_bit(vid, priv->active_vlans);
ret = stmmac_vlan_update(priv, is_double);
if (ret) {
clear_bit(vid, priv->active_vlans);
return ret;
}
return ret;
}
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
bool is_double = false;
if (!priv->dma_cap.vlhash)
return -EOPNOTSUPP;
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
clear_bit(vid, priv->active_vlans);
return stmmac_vlan_update(priv, is_double);
}
static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open,
.ndo_start_xmit = stmmac_xmit,
......@@ -4027,6 +4126,8 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_poll_controller = stmmac_poll_controller,
#endif
.ndo_set_mac_address = stmmac_set_mac_address,
.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
};
static void stmmac_reset_subtask(struct stmmac_priv *priv)
......@@ -4175,8 +4276,8 @@ int stmmac_dvr_probe(struct device *device,
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
u32 queue, maxq;
int ret = 0;
u32 queue, rxq, maxq;
int i, ret = 0;
ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
......@@ -4281,9 +4382,22 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
if (priv->dma_cap.vlhash) {
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
}
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
/* Initialize RSS */
rxq = priv->plat->rx_queues_to_use;
netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
if (priv->dma_cap.rssen && priv->plat->rss_en)
ndev->features |= NETIF_F_RXHASH;
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
......
......@@ -11,8 +11,10 @@
#include <linux/ip.h>
#include <linux/phy.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
#include "stmmac.h"
struct stmmachdr {
......@@ -229,7 +231,7 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
goto out;
}
if (tpriv->packet->src) {
if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
goto out;
}
......@@ -700,6 +702,308 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
return ret;
}
static int stmmac_test_rss(struct stmmac_priv *priv)
{
struct stmmac_packet_attrs attr = { };
if (!priv->dma_cap.rssen || !priv->rss.enable)
return -EOPNOTSUPP;
attr.dst = priv->dev->dev_addr;
attr.exp_hash = true;
attr.sport = 0x321;
attr.dport = 0x123;
return __stmmac_test_loopback(priv, &attr);
}
static int stmmac_test_vlan_validate(struct sk_buff *skb,
struct net_device *ndev,
struct packet_type *pt,
struct net_device *orig_ndev)
{
struct stmmac_test_priv *tpriv = pt->af_packet_priv;
struct stmmachdr *shdr;
struct ethhdr *ehdr;
struct udphdr *uhdr;
struct iphdr *ihdr;
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb)
goto out;
if (skb_linearize(skb))
goto out;
if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
goto out;
ehdr = (struct ethhdr *)skb_mac_header(skb);
if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
goto out;
ihdr = ip_hdr(skb);
if (tpriv->double_vlan)
ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
if (ihdr->protocol != IPPROTO_UDP)
goto out;
uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
if (uhdr->dest != htons(tpriv->packet->dport))
goto out;
shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
goto out;
tpriv->ok = true;
complete(&tpriv->comp);
out:
kfree_skb(skb);
return 0;
}
static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
{
struct stmmac_packet_attrs attr = { };
struct stmmac_test_priv *tpriv;
struct sk_buff *skb = NULL;
int ret = 0, i;
if (!priv->dma_cap.vlhash)
return -EOPNOTSUPP;
tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
if (!tpriv)
return -ENOMEM;
tpriv->ok = false;
init_completion(&tpriv->comp);
tpriv->pt.type = htons(ETH_P_IP);
tpriv->pt.func = stmmac_test_vlan_validate;
tpriv->pt.dev = priv->dev;
tpriv->pt.af_packet_priv = tpriv;
tpriv->packet = &attr;
/*
* As we use HASH filtering, false positives may appear. This is a
* specially chosen ID so that adjacent IDs (+4) have different
* HASH values.
*/
tpriv->vlan_id = 0x123;
dev_add_pack(&tpriv->pt);
ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
if (ret)
goto cleanup;
for (i = 0; i < 4; i++) {
attr.vlan = 1;
attr.vlan_id_out = tpriv->vlan_id + i;
attr.dst = priv->dev->dev_addr;
attr.sport = 9;
attr.dport = 9;
skb = stmmac_test_get_udp_skb(priv, &attr);
if (!skb) {
ret = -ENOMEM;
goto vlan_del;
}
skb_set_queue_mapping(skb, 0);
ret = dev_queue_xmit(skb);
if (ret)
goto vlan_del;
wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
ret = !tpriv->ok;
if (ret && !i) {
goto vlan_del;
} else if (!ret && i) {
ret = -1;
goto vlan_del;
} else {
ret = 0;
}
tpriv->ok = false;
}
vlan_del:
vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
cleanup:
dev_remove_pack(&tpriv->pt);
kfree(tpriv);
return ret;
}
static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
{
struct stmmac_packet_attrs attr = { };
struct stmmac_test_priv *tpriv;
struct sk_buff *skb = NULL;
int ret = 0, i;
if (!priv->dma_cap.vlhash)
return -EOPNOTSUPP;
tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
if (!tpriv)
return -ENOMEM;
tpriv->ok = false;
tpriv->double_vlan = true;
init_completion(&tpriv->comp);
tpriv->pt.type = htons(ETH_P_8021Q);
tpriv->pt.func = stmmac_test_vlan_validate;
tpriv->pt.dev = priv->dev;
tpriv->pt.af_packet_priv = tpriv;
tpriv->packet = &attr;
/*
* As we use HASH filtering, false positives may appear. This is a
* specially chosen ID so that adjacent IDs (+4) have different
* HASH values.
*/
tpriv->vlan_id = 0x123;
dev_add_pack(&tpriv->pt);
ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
if (ret)
goto cleanup;
for (i = 0; i < 4; i++) {
attr.vlan = 2;
attr.vlan_id_out = tpriv->vlan_id + i;
attr.dst = priv->dev->dev_addr;
attr.sport = 9;
attr.dport = 9;
skb = stmmac_test_get_udp_skb(priv, &attr);
if (!skb) {
ret = -ENOMEM;
goto vlan_del;
}
skb_set_queue_mapping(skb, 0);
ret = dev_queue_xmit(skb);
if (ret)
goto vlan_del;
wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
ret = !tpriv->ok;
if (ret && !i) {
goto vlan_del;
} else if (!ret && i) {
ret = -1;
goto vlan_del;
} else {
ret = 0;
}
tpriv->ok = false;
}
vlan_del:
vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
cleanup:
dev_remove_pack(&tpriv->pt);
kfree(tpriv);
return ret;
}
#ifdef CONFIG_NET_CLS_ACT
static int stmmac_test_rxp(struct stmmac_priv *priv)
{
unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
struct tc_cls_u32_offload cls_u32 = { };
struct stmmac_packet_attrs attr = { };
struct tc_action **actions, *act;
struct tc_u32_sel *sel;
struct tcf_exts *exts;
int ret, i, nk = 1;
if (!tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (!priv->dma_cap.frpsel)
return -EOPNOTSUPP;
sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL);
if (!sel)
return -ENOMEM;
exts = kzalloc(sizeof(*exts), GFP_KERNEL);
if (!exts) {
ret = -ENOMEM;
goto cleanup_sel;
}
actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
if (!actions) {
ret = -ENOMEM;
goto cleanup_exts;
}
act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
if (!act) {
ret = -ENOMEM;
goto cleanup_actions;
}
cls_u32.command = TC_CLSU32_NEW_KNODE;
cls_u32.common.chain_index = 0;
cls_u32.common.protocol = htons(ETH_P_ALL);
cls_u32.knode.exts = exts;
cls_u32.knode.sel = sel;
cls_u32.knode.handle = 0x123;
exts->nr_actions = nk;
exts->actions = actions;
for (i = 0; i < nk; i++) {
struct tcf_gact *gact = to_gact(&act[i]);
actions[i] = &act[i];
gact->tcf_action = TC_ACT_SHOT;
}
sel->nkeys = nk;
sel->offshift = 0;
sel->keys[0].off = 6;
sel->keys[0].val = htonl(0xdeadbeef);
sel->keys[0].mask = ~0x0;
ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
if (ret)
goto cleanup_act;
attr.dst = priv->dev->dev_addr;
attr.src = addr;
ret = __stmmac_test_loopback(priv, &attr);
ret = !ret; /* Shall NOT receive packet */
cls_u32.command = TC_CLSU32_DELETE_KNODE;
stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
cleanup_act:
kfree(act);
cleanup_actions:
kfree(actions);
cleanup_exts:
kfree(exts);
cleanup_sel:
kfree(sel);
return ret;
}
#else
static int stmmac_test_rxp(struct stmmac_priv *priv)
{
return -EOPNOTSUPP;
}
#endif
#define STMMAC_LOOPBACK_NONE 0
#define STMMAC_LOOPBACK_MAC 1
#define STMMAC_LOOPBACK_PHY 2
......@@ -745,6 +1049,22 @@ static const struct stmmac_test {
.name = "Flow Control ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_flowctrl,
}, {
.name = "RSS ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_rss,
}, {
.name = "VLAN Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_vlanfilt,
}, {
.name = "Double VLAN Filtering",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_dvlanfilt,
}, {
.name = "Flexible RX Parser ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_rxp,
},
};
......
......@@ -173,6 +173,7 @@ struct plat_stmmacenet_data {
int has_gmac4;
bool has_sun8i;
bool tso_en;
int rss_en;
int mac_port_sel_speed;
bool en_tx_lpi_clockgating;
int has_xgmac;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment