Commit d1e8496e authored by David S. Miller's avatar David S. Miller

Merge branch 'stmmac-next'

Jose Abreu says:

====================
net: stmmac: Improvements for -next

Couple of improvements for -next tree. More info in commit logs.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 44c40910 427849e8
......@@ -360,6 +360,8 @@ struct dma_features {
unsigned int sphen;
unsigned int vlins;
unsigned int dvlan;
unsigned int l3l4fnum;
unsigned int arpoffsel;
};
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
......
......@@ -44,9 +44,11 @@
#define XGMAC_CONFIG_CST BIT(2)
#define XGMAC_CONFIG_ACS BIT(1)
#define XGMAC_CONFIG_RE BIT(0)
#define XGMAC_CORE_INIT_RX 0
#define XGMAC_CORE_INIT_RX (XGMAC_CONFIG_GPSLCE | XGMAC_CONFIG_WD | \
(XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT))
#define XGMAC_PACKET_FILTER 0x00000008
#define XGMAC_FILTER_RA BIT(31)
#define XGMAC_FILTER_IPFE BIT(20)
#define XGMAC_FILTER_VTFE BIT(16)
#define XGMAC_FILTER_HPF BIT(10)
#define XGMAC_FILTER_PCF BIT(7)
......@@ -119,6 +121,7 @@
#define XGMAC_HWFEAT_VLHASH BIT(4)
#define XGMAC_HWFEAT_GMIISEL BIT(1)
#define XGMAC_HW_FEATURE1 0x00000120
#define XGMAC_HWFEAT_L3L4FNUM GENMASK(30, 27)
#define XGMAC_HWFEAT_RSSEN BIT(20)
#define XGMAC_HWFEAT_TSOEN BIT(18)
#define XGMAC_HWFEAT_SPHEN BIT(17)
......@@ -150,6 +153,34 @@
#define XGMAC_DCS GENMASK(19, 16)
#define XGMAC_DCS_SHIFT 16
#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
#define XGMAC_L3L4_ADDR_CTRL 0x00000c00
#define XGMAC_IDDR GENMASK(15, 8)
#define XGMAC_IDDR_SHIFT 8
#define XGMAC_IDDR_FNUM 4
#define XGMAC_TT BIT(1)
#define XGMAC_XB BIT(0)
#define XGMAC_L3L4_DATA 0x00000c04
#define XGMAC_L3L4_CTRL 0x0
#define XGMAC_L4DPIM0 BIT(21)
#define XGMAC_L4DPM0 BIT(20)
#define XGMAC_L4SPIM0 BIT(19)
#define XGMAC_L4SPM0 BIT(18)
#define XGMAC_L4PEN0 BIT(16)
#define XGMAC_L3HDBM0 GENMASK(15, 11)
#define XGMAC_L3HSBM0 GENMASK(10, 6)
#define XGMAC_L3DAIM0 BIT(5)
#define XGMAC_L3DAM0 BIT(4)
#define XGMAC_L3SAIM0 BIT(3)
#define XGMAC_L3SAM0 BIT(2)
#define XGMAC_L3PEN0 BIT(0)
#define XGMAC_L4_ADDR 0x1
#define XGMAC_L4DP0 GENMASK(31, 16)
#define XGMAC_L4DP0_SHIFT 16
#define XGMAC_L4SP0 GENMASK(15, 0)
#define XGMAC_L3_ADDR0 0x4
#define XGMAC_L3_ADDR1 0x5
#define XGMAC_L3_ADDR2 0x6
#define XMGAC_L3_ADDR3 0x7
#define XGMAC_ARP_ADDR 0x00000c10
#define XGMAC_RSS_CTRL 0x00000c80
#define XGMAC_UDP4TE BIT(3)
......
......@@ -15,7 +15,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
void __iomem *ioaddr = hw->pcsr;
int mtu = dev->mtu;
u32 tx, rx;
tx = readl(ioaddr + XGMAC_TX_CONFIG);
......@@ -24,16 +23,6 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
tx |= XGMAC_CORE_INIT_TX;
rx |= XGMAC_CORE_INIT_RX;
if (mtu >= 9000) {
rx |= XGMAC_CONFIG_GPSLCE;
rx |= XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT;
rx |= XGMAC_CONFIG_WD;
} else if (mtu > 2000) {
rx |= XGMAC_CONFIG_JE;
} else if (mtu > 1500) {
rx |= XGMAC_CONFIG_S2KP;
}
if (hw->ps) {
tx |= XGMAC_CONFIG_TE;
tx &= ~hw->link.speed_mask;
......@@ -1163,6 +1152,197 @@ static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
writel(value, ioaddr + XGMAC_VLAN_INCL);
}
static int dwxgmac2_filter_wait(struct mac_device_info *hw)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
!(value & XGMAC_XB), 100, 10000))
return -EBUSY;
return 0;
}
static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
u8 reg, u32 *data)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
int ret;
ret = dwxgmac2_filter_wait(hw);
if (ret)
return ret;
value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
value |= XGMAC_TT | XGMAC_XB;
writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
ret = dwxgmac2_filter_wait(hw);
if (ret)
return ret;
*data = readl(ioaddr + XGMAC_L3L4_DATA);
return 0;
}
static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
u8 reg, u32 data)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
int ret;
ret = dwxgmac2_filter_wait(hw);
if (ret)
return ret;
writel(data, ioaddr + XGMAC_L3L4_DATA);
value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
value |= XGMAC_XB;
writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
return dwxgmac2_filter_wait(hw);
}
static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
bool en, bool ipv6, bool sa, bool inv,
u32 match)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
int ret;
value = readl(ioaddr + XGMAC_PACKET_FILTER);
value |= XGMAC_FILTER_IPFE;
writel(value, ioaddr + XGMAC_PACKET_FILTER);
ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
if (ret)
return ret;
/* For IPv6 not both SA/DA filters can be active */
if (ipv6) {
value |= XGMAC_L3PEN0;
value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
if (sa) {
value |= XGMAC_L3SAM0;
if (inv)
value |= XGMAC_L3SAIM0;
} else {
value |= XGMAC_L3DAM0;
if (inv)
value |= XGMAC_L3DAIM0;
}
} else {
value &= ~XGMAC_L3PEN0;
if (sa) {
value |= XGMAC_L3SAM0;
if (inv)
value |= XGMAC_L3SAIM0;
} else {
value |= XGMAC_L3DAM0;
if (inv)
value |= XGMAC_L3DAIM0;
}
}
ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
if (ret)
return ret;
if (sa) {
ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
if (ret)
return ret;
} else {
ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
if (ret)
return ret;
}
if (!en)
return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
return 0;
}
static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
bool en, bool udp, bool sa, bool inv,
u32 match)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
int ret;
value = readl(ioaddr + XGMAC_PACKET_FILTER);
value |= XGMAC_FILTER_IPFE;
writel(value, ioaddr + XGMAC_PACKET_FILTER);
ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
if (ret)
return ret;
if (udp) {
value |= XGMAC_L4PEN0;
} else {
value &= ~XGMAC_L4PEN0;
}
value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
if (sa) {
value |= XGMAC_L4SPM0;
if (inv)
value |= XGMAC_L4SPIM0;
} else {
value |= XGMAC_L4DPM0;
if (inv)
value |= XGMAC_L4DPIM0;
}
ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
if (ret)
return ret;
if (sa) {
value = match & XGMAC_L4SP0;
ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
if (ret)
return ret;
} else {
value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
if (ret)
return ret;
}
if (!en)
return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
return 0;
}
static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
u32 addr)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
writel(addr, ioaddr + XGMAC_ARP_ADDR);
value = readl(ioaddr + XGMAC_RX_CONFIG);
if (en)
value |= XGMAC_CONFIG_ARPEN;
else
value &= ~XGMAC_CONFIG_ARPEN;
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
.set_mac = dwxgmac2_set_mac,
......@@ -1203,6 +1383,9 @@ const struct stmmac_ops dwxgmac210_ops = {
.flex_pps_config = dwxgmac2_flex_pps_config,
.sarc_configure = dwxgmac2_sarc_configure,
.enable_vlan = dwxgmac2_enable_vlan,
.config_l3_filter = dwxgmac2_config_l3_filter,
.config_l4_filter = dwxgmac2_config_l4_filter,
.set_arp_offload = dwxgmac2_set_arp_offload,
};
int dwxgmac2_setup(struct stmmac_priv *priv)
......
......@@ -322,6 +322,10 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
/* ABNORMAL interrupts */
if (unlikely(intr_status & XGMAC_AIS)) {
if (unlikely(intr_status & XGMAC_RBU)) {
x->rx_buf_unav_irq++;
ret |= handle_rx;
}
if (unlikely(intr_status & XGMAC_TPS)) {
x->tx_process_stopped_irq++;
ret |= tx_hard_error;
......@@ -365,7 +369,8 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
dma_cap->av &= !(hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9;
dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
......@@ -374,6 +379,7 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27;
dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
......
......@@ -363,6 +363,14 @@ struct stmmac_ops {
int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
/* Source Address Insertion / Replacement */
void (*sarc_configure)(void __iomem *ioaddr, int val);
/* Filtering */
int (*config_l3_filter)(struct mac_device_info *hw, u32 filter_no,
bool en, bool ipv6, bool sa, bool inv,
u32 match);
int (*config_l4_filter)(struct mac_device_info *hw, u32 filter_no,
bool en, bool udp, bool sa, bool inv,
u32 match);
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
};
#define stmmac_core_init(__priv, __args...) \
......@@ -443,6 +451,12 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
#define stmmac_sarc_configure(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, sarc_configure, __args)
#define stmmac_config_l3_filter(__priv, __args...) \
stmmac_do_callback(__priv, mac, config_l3_filter, __args)
#define stmmac_config_l4_filter(__priv, __args...) \
stmmac_do_callback(__priv, mac, config_l4_filter, __args)
#define stmmac_set_arp_offload(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_arp_offload, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
......@@ -499,6 +513,7 @@ struct stmmac_mode_ops {
struct stmmac_priv;
struct tc_cls_u32_offload;
struct tc_cbs_qopt_offload;
struct flow_cls_offload;
struct stmmac_tc_ops {
int (*init)(struct stmmac_priv *priv);
......@@ -506,6 +521,8 @@ struct stmmac_tc_ops {
struct tc_cls_u32_offload *cls);
int (*setup_cbs)(struct stmmac_priv *priv,
struct tc_cbs_qopt_offload *qopt);
int (*setup_cls)(struct stmmac_priv *priv,
struct flow_cls_offload *cls);
};
#define stmmac_tc_init(__priv, __args...) \
......@@ -514,6 +531,8 @@ struct stmmac_tc_ops {
stmmac_do_callback(__priv, tc, setup_cls_u32, __args)
#define stmmac_tc_setup_cbs(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cbs, __args)
#define stmmac_tc_setup_cls(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_cls, __args)
struct stmmac_counters;
......
......@@ -128,6 +128,16 @@ struct stmmac_rss {
u32 table[STMMAC_RSS_MAX_TABLE_SIZE];
};
#define STMMAC_FLOW_ACTION_DROP BIT(0)
struct stmmac_flow_entry {
unsigned long cookie;
unsigned long action;
u8 ip_proto;
int in_use;
int idx;
int is_l4;
};
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames;
......@@ -216,6 +226,8 @@ struct stmmac_priv {
unsigned int tc_entries_max;
unsigned int tc_off_max;
struct stmmac_tc_entry *tc_entries;
unsigned int flow_entries_max;
struct stmmac_flow_entry *flow_entries;
/* Pulse Per Second output */
struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
......
......@@ -746,9 +746,16 @@ static int stmmac_set_coalesce(struct net_device *dev,
(ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
return -EOPNOTSUPP;
if (ec->rx_coalesce_usecs == 0)
if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) {
rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
return -EINVAL;
priv->rx_riwt = rx_riwt;
stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
}
if ((ec->tx_coalesce_usecs == 0) &&
(ec->tx_max_coalesced_frames == 0))
return -EINVAL;
......@@ -757,20 +764,10 @@ static int stmmac_set_coalesce(struct net_device *dev,
(ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
return -EINVAL;
rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
return -EINVAL;
else if (!priv->use_riwt)
return -EOPNOTSUPP;
/* Only copy relevant parameters, ignore all others. */
priv->tx_coal_frames = ec->tx_max_coalesced_frames;
priv->tx_coal_timer = ec->tx_coalesce_usecs;
priv->rx_coal_frames = ec->rx_max_coalesced_frames;
priv->rx_riwt = rx_riwt;
stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
return 0;
}
......
......@@ -3511,9 +3511,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
&priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
page_pool_recycle_direct(rx_q->page_pool, buf->page);
priv->dev->stats.rx_errors++;
buf->page = NULL;
error = 1;
if (!priv->hwts_rx_en)
priv->dev->stats.rx_errors++;
}
if (unlikely(error && (status & rx_not_ls)))
......@@ -3931,13 +3932,18 @@ static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
struct stmmac_priv *priv = cb_priv;
int ret = -EOPNOTSUPP;
if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
return ret;
stmmac_disable_all_queues(priv);
switch (type) {
case TC_SETUP_CLSU32:
if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
break;
case TC_SETUP_CLSFLOWER:
ret = stmmac_tc_setup_cls(priv, priv, type_data);
break;
default:
break;
}
......@@ -4536,10 +4542,10 @@ int stmmac_dvr_probe(struct device *device,
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
ndev->max_mtu = JUMBO_LEN;
else if (priv->plat->has_xgmac)
if (priv->plat->has_xgmac)
ndev->max_mtu = XGMAC_JUMBO_LEN;
else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
ndev->max_mtu = JUMBO_LEN;
else
ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
......
......@@ -43,9 +43,11 @@ struct stmmac_packet_attrs {
int dont_wait;
int timeout;
int size;
int max_size;
int remove_sa;
u8 id;
int sarc;
u16 queue_mapping;
};
static u8 stmmac_test_next_id;
......@@ -73,12 +75,14 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
else
size += sizeof(struct udphdr);
skb = netdev_alloc_skb(priv->dev, size);
if (attr->max_size && (attr->max_size > size))
size = attr->max_size;
skb = netdev_alloc_skb_ip_align(priv->dev, size);
if (!skb)
return NULL;
prefetchw(skb->data);
skb_reserve(skb, NET_IP_ALIGN);
if (attr->vlan > 1)
ehdr = skb_push(skb, ETH_HLEN + 8);
......@@ -147,6 +151,9 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
uhdr->source = htons(attr->sport);
uhdr->dest = htons(attr->dport);
uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
if (attr->max_size)
uhdr->len = htons(attr->max_size -
(sizeof(*ihdr) + sizeof(*ehdr)));
uhdr->check = 0;
}
......@@ -162,9 +169,13 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
iplen += sizeof(*thdr);
else
iplen += sizeof(*uhdr);
if (attr->max_size)
iplen = attr->max_size - sizeof(*ehdr);
ihdr->tot_len = htons(iplen);
ihdr->frag_off = 0;
ihdr->saddr = 0;
ihdr->saddr = htonl(attr->ip_src);
ihdr->daddr = htonl(attr->ip_dst);
ihdr->tos = 0;
ihdr->id = 0;
......@@ -178,6 +189,8 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
if (attr->size)
skb_put(skb, attr->size);
if (attr->max_size && (attr->max_size > skb->len))
skb_put(skb, attr->max_size - skb->len);
skb->csum = 0;
skb->ip_summed = CHECKSUM_PARTIAL;
......@@ -196,6 +209,24 @@ static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
return skb;
}
static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv,
struct stmmac_packet_attrs *attr)
{
__be32 ip_src = htonl(attr->ip_src);
__be32 ip_dst = htonl(attr->ip_dst);
struct sk_buff *skb = NULL;
skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src,
NULL, attr->src, attr->dst);
if (!skb)
return NULL;
skb->pkt_type = PACKET_HOST;
skb->dev = priv->dev;
return skb;
}
struct stmmac_test_priv {
struct stmmac_packet_attrs *packet;
struct packet_type pt;
......@@ -306,7 +337,7 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
goto cleanup;
}
skb_set_queue_mapping(skb, 0);
skb_set_queue_mapping(skb, attr->queue_mapping);
ret = dev_queue_xmit(skb);
if (ret)
goto cleanup;
......@@ -318,7 +349,7 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
attr->timeout = STMMAC_LB_TIMEOUT;
wait_for_completion_timeout(&tpriv->comp, attr->timeout);
ret = !tpriv->ok;
ret = tpriv->ok ? 0 : -ETIMEDOUT;
cleanup:
if (!attr->dont_wait)
......@@ -480,7 +511,7 @@ static int stmmac_test_hfilt(struct stmmac_priv *priv)
/* Shall NOT receive packet */
ret = __stmmac_test_loopback(priv, &attr);
ret = !ret;
ret = ret ? 0 : -EINVAL;
cleanup:
dev_mc_del(priv->dev, gd_addr);
......@@ -512,7 +543,7 @@ static int stmmac_test_pfilt(struct stmmac_priv *priv)
/* Shall NOT receive packet */
ret = __stmmac_test_loopback(priv, &attr);
ret = !ret;
ret = ret ? 0 : -EINVAL;
cleanup:
dev_uc_del(priv->dev, gd_addr);
......@@ -562,7 +593,7 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
/* Shall NOT receive packet */
ret = __stmmac_test_loopback(priv, &attr);
ret = !ret;
ret = ret ? 0 : -EINVAL;
cleanup:
dev_uc_del(priv->dev, uc_addr);
......@@ -600,7 +631,7 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
/* Shall NOT receive packet */
ret = __stmmac_test_loopback(priv, &attr);
ret = !ret;
ret = ret ? 0 : -EINVAL;
cleanup:
dev_mc_del(priv->dev, mc_addr);
......@@ -699,7 +730,7 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
}
wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
ret = !tpriv->ok;
ret = tpriv->ok ? 0 : -ETIMEDOUT;
cleanup:
dev_mc_del(priv->dev, paddr);
......@@ -833,11 +864,11 @@ static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
goto vlan_del;
wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
ret = !tpriv->ok;
ret = tpriv->ok ? 0 : -ETIMEDOUT;
if (ret && !i) {
goto vlan_del;
} else if (!ret && i) {
ret = -1;
ret = -EINVAL;
goto vlan_del;
} else {
ret = 0;
......@@ -909,11 +940,11 @@ static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
goto vlan_del;
wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
ret = !tpriv->ok;
ret = tpriv->ok ? 0 : -ETIMEDOUT;
if (ret && !i) {
goto vlan_del;
} else if (!ret && i) {
ret = -1;
ret = -EINVAL;
goto vlan_del;
} else {
ret = 0;
......@@ -998,7 +1029,7 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
attr.src = addr;
ret = __stmmac_test_loopback(priv, &attr);
ret = !ret; /* Shall NOT receive packet */
ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */
cls_u32.command = TC_CLSU32_DELETE_KNODE;
stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
......@@ -1168,6 +1199,392 @@ static int stmmac_test_svlanoff(struct stmmac_priv *priv)
return stmmac_test_vlanoff_common(priv, true);
}
#ifdef CONFIG_NET_CLS_ACT
static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
u32 dst_mask, u32 src_mask)
{
struct flow_dissector_key_ipv4_addrs key, mask;
unsigned long dummy_cookie = 0xdeadbeef;
struct stmmac_packet_attrs attr = { };
struct flow_dissector *dissector;
struct flow_cls_offload *cls;
struct flow_rule *rule;
int ret;
if (!tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (!priv->dma_cap.l3l4fnum)
return -EOPNOTSUPP;
if (priv->rss.enable) {
struct stmmac_rss rss = { .enable = false, };
stmmac_rss_configure(priv, priv->hw, &rss,
priv->plat->rx_queues_to_use);
}
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
if (!dissector) {
ret = -ENOMEM;
goto cleanup_rss;
}
dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS);
dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0;
cls = kzalloc(sizeof(*cls), GFP_KERNEL);
if (!cls) {
ret = -ENOMEM;
goto cleanup_dissector;
}
cls->common.chain_index = 0;
cls->command = FLOW_CLS_REPLACE;
cls->cookie = dummy_cookie;
rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
if (!rule) {
ret = -ENOMEM;
goto cleanup_cls;
}
rule->match.dissector = dissector;
rule->match.key = (void *)&key;
rule->match.mask = (void *)&mask;
key.src = htonl(src);
key.dst = htonl(dst);
mask.src = src_mask;
mask.dst = dst_mask;
cls->rule = rule;
rule->action.entries[0].id = FLOW_ACTION_DROP;
rule->action.num_entries = 1;
attr.dst = priv->dev->dev_addr;
attr.ip_dst = dst;
attr.ip_src = src;
/* Shall receive packet */
ret = __stmmac_test_loopback(priv, &attr);
if (ret)
goto cleanup_rule;
ret = stmmac_tc_setup_cls(priv, priv, cls);
if (ret)
goto cleanup_rule;
/* Shall NOT receive packet */
ret = __stmmac_test_loopback(priv, &attr);
ret = ret ? 0 : -EINVAL;
cls->command = FLOW_CLS_DESTROY;
stmmac_tc_setup_cls(priv, priv, cls);
cleanup_rule:
kfree(rule);
cleanup_cls:
kfree(cls);
cleanup_dissector:
kfree(dissector);
cleanup_rss:
if (priv->rss.enable) {
stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
}
return ret;
}
#else
static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
u32 dst_mask, u32 src_mask)
{
return -EOPNOTSUPP;
}
#endif
static int stmmac_test_l3filt_da(struct stmmac_priv *priv)
{
u32 addr = 0x10203040;
return __stmmac_test_l3filt(priv, addr, 0, ~0, 0);
}
static int stmmac_test_l3filt_sa(struct stmmac_priv *priv)
{
u32 addr = 0x10203040;
return __stmmac_test_l3filt(priv, 0, addr, 0, ~0);
}
#ifdef CONFIG_NET_CLS_ACT
static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
u32 dst_mask, u32 src_mask, bool udp)
{
struct {
struct flow_dissector_key_basic bkey;
struct flow_dissector_key_ports key;
} __aligned(BITS_PER_LONG / 8) keys;
struct {
struct flow_dissector_key_basic bmask;
struct flow_dissector_key_ports mask;
} __aligned(BITS_PER_LONG / 8) masks;
unsigned long dummy_cookie = 0xdeadbeef;
struct stmmac_packet_attrs attr = { };
struct flow_dissector *dissector;
struct flow_cls_offload *cls;
struct flow_rule *rule;
int ret;
if (!tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (!priv->dma_cap.l3l4fnum)
return -EOPNOTSUPP;
if (priv->rss.enable) {
struct stmmac_rss rss = { .enable = false, };
stmmac_rss_configure(priv, priv->hw, &rss,
priv->plat->rx_queues_to_use);
}
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
if (!dissector) {
ret = -ENOMEM;
goto cleanup_rss;
}
dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC);
dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS);
dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0;
dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key);
cls = kzalloc(sizeof(*cls), GFP_KERNEL);
if (!cls) {
ret = -ENOMEM;
goto cleanup_dissector;
}
cls->common.chain_index = 0;
cls->command = FLOW_CLS_REPLACE;
cls->cookie = dummy_cookie;
rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
if (!rule) {
ret = -ENOMEM;
goto cleanup_cls;
}
rule->match.dissector = dissector;
rule->match.key = (void *)&keys;
rule->match.mask = (void *)&masks;
keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP;
keys.key.src = htons(src);
keys.key.dst = htons(dst);
masks.mask.src = src_mask;
masks.mask.dst = dst_mask;
cls->rule = rule;
rule->action.entries[0].id = FLOW_ACTION_DROP;
rule->action.num_entries = 1;
attr.dst = priv->dev->dev_addr;
attr.tcp = !udp;
attr.sport = src;
attr.dport = dst;
attr.ip_dst = 0;
/* Shall receive packet */
ret = __stmmac_test_loopback(priv, &attr);
if (ret)
goto cleanup_rule;
ret = stmmac_tc_setup_cls(priv, priv, cls);
if (ret)
goto cleanup_rule;
/* Shall NOT receive packet */
ret = __stmmac_test_loopback(priv, &attr);
ret = ret ? 0 : -EINVAL;
cls->command = FLOW_CLS_DESTROY;
stmmac_tc_setup_cls(priv, priv, cls);
cleanup_rule:
kfree(rule);
cleanup_cls:
kfree(cls);
cleanup_dissector:
kfree(dissector);
cleanup_rss:
if (priv->rss.enable) {
stmmac_rss_configure(priv, priv->hw, &priv->rss,
priv->plat->rx_queues_to_use);
}
return ret;
}
#else
static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
u32 dst_mask, u32 src_mask, bool udp)
{
return -EOPNOTSUPP;
}
#endif
static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv)
{
u16 dummy_port = 0x123;
return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false);
}
static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv)
{
u16 dummy_port = 0x123;
return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false);
}
static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv)
{
u16 dummy_port = 0x123;
return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true);
}
static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv)
{
u16 dummy_port = 0x123;
return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true);
}
static int stmmac_test_arp_validate(struct sk_buff *skb,
struct net_device *ndev,
struct packet_type *pt,
struct net_device *orig_ndev)
{
struct stmmac_test_priv *tpriv = pt->af_packet_priv;
struct ethhdr *ehdr;
struct arphdr *ahdr;
ehdr = (struct ethhdr *)skb_mac_header(skb);
if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->src))
goto out;
ahdr = arp_hdr(skb);
if (ahdr->ar_op != htons(ARPOP_REPLY))
goto out;
tpriv->ok = true;
complete(&tpriv->comp);
out:
kfree_skb(skb);
return 0;
}
static int stmmac_test_arpoffload(struct stmmac_priv *priv)
{
unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06};
unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct stmmac_packet_attrs attr = { };
struct stmmac_test_priv *tpriv;
struct sk_buff *skb = NULL;
u32 ip_addr = 0xdeadcafe;
u32 ip_src = 0xdeadbeef;
int ret;
if (!priv->dma_cap.arpoffsel)
return -EOPNOTSUPP;
tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
if (!tpriv)
return -ENOMEM;
tpriv->ok = false;
init_completion(&tpriv->comp);
tpriv->pt.type = htons(ETH_P_ARP);
tpriv->pt.func = stmmac_test_arp_validate;
tpriv->pt.dev = priv->dev;
tpriv->pt.af_packet_priv = tpriv;
tpriv->packet = &attr;
dev_add_pack(&tpriv->pt);
attr.src = src;
attr.ip_src = ip_src;
attr.dst = dst;
attr.ip_dst = ip_addr;
skb = stmmac_test_get_arp_skb(priv, &attr);
if (!skb) {
ret = -ENOMEM;
goto cleanup;
}
ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
if (ret)
goto cleanup;
ret = dev_set_promiscuity(priv->dev, 1);
if (ret)
goto cleanup;
skb_set_queue_mapping(skb, 0);
ret = dev_queue_xmit(skb);
if (ret)
goto cleanup_promisc;
wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
ret = tpriv->ok ? 0 : -ETIMEDOUT;
cleanup_promisc:
dev_set_promiscuity(priv->dev, -1);
cleanup:
stmmac_set_arp_offload(priv, priv->hw, false, 0x0);
dev_remove_pack(&tpriv->pt);
kfree(tpriv);
return ret;
}
static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
{
struct stmmac_packet_attrs attr = { };
int size = priv->dma_buf_sz;
/* Only XGMAC has SW support for multiple RX descs in same packet */
if (priv->plat->has_xgmac)
size = priv->dev->max_mtu;
attr.dst = priv->dev->dev_addr;
attr.max_size = size - ETH_FCS_LEN;
attr.queue_mapping = queue;
return __stmmac_test_loopback(priv, &attr);
}
static int stmmac_test_jumbo(struct stmmac_priv *priv)
{
return __stmmac_test_jumbo(priv, 0);
}
static int stmmac_test_mjumbo(struct stmmac_priv *priv)
{
u32 chan, tx_cnt = priv->plat->tx_queues_to_use;
int ret;
if (tx_cnt <= 1)
return -EOPNOTSUPP;
for (chan = 0; chan < tx_cnt; chan++) {
ret = __stmmac_test_jumbo(priv, chan);
if (ret)
return ret;
}
return 0;
}
#define STMMAC_LOOPBACK_NONE 0
#define STMMAC_LOOPBACK_MAC 1
#define STMMAC_LOOPBACK_PHY 2
......@@ -1253,6 +1670,42 @@ static const struct stmmac_test {
.name = "SVLAN TX Insertion ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_svlanoff,
}, {
.name = "L3 DA Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l3filt_da,
}, {
.name = "L3 SA Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l3filt_sa,
}, {
.name = "L4 DA TCP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_da_tcp,
}, {
.name = "L4 SA TCP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_sa_tcp,
}, {
.name = "L4 DA UDP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_da_udp,
}, {
.name = "L4 SA UDP Filtering ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_l4filt_sa_udp,
}, {
.name = "ARP Offload ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_arpoffload,
}, {
.name = "Jumbo Frame ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_jumbo,
}, {
.name = "Multichannel Jumbo ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_mjumbo,
},
};
......
......@@ -242,9 +242,27 @@ static int tc_init(struct stmmac_priv *priv)
{
struct dma_features *dma_cap = &priv->dma_cap;
unsigned int count;
int i;
if (dma_cap->l3l4fnum) {
priv->flow_entries_max = dma_cap->l3l4fnum;
priv->flow_entries = devm_kcalloc(priv->device,
dma_cap->l3l4fnum,
sizeof(*priv->flow_entries),
GFP_KERNEL);
if (!priv->flow_entries)
return -ENOMEM;
for (i = 0; i < priv->flow_entries_max; i++)
priv->flow_entries[i].idx = i;
dev_info(priv->device, "Enabled Flow TC (entries=%d)\n",
priv->flow_entries_max);
}
/* Fail silently as we can still use remaining features, e.g. CBS */
if (!dma_cap->frpsel)
return -EINVAL;
return 0;
switch (dma_cap->frpbs) {
case 0x0:
......@@ -349,8 +367,235 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
return 0;
}
static int tc_parse_flow_actions(struct stmmac_priv *priv,
struct flow_action *action,
struct stmmac_flow_entry *entry)
{
struct flow_action_entry *act;
int i;
if (!flow_action_has_entries(action))
return -EINVAL;
flow_action_for_each(i, act, action) {
switch (act->id) {
case FLOW_ACTION_DROP:
entry->action |= STMMAC_FLOW_ACTION_DROP;
return 0;
default:
break;
}
}
/* Nothing to do, maybe inverse filter ? */
return 0;
}
static int tc_add_basic_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls,
struct stmmac_flow_entry *entry)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
struct flow_match_basic match;
/* Nothing to do here */
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
return -EINVAL;
flow_rule_match_basic(rule, &match);
entry->ip_proto = match.key->ip_proto;
return 0;
}
static int tc_add_ip4_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls,
struct stmmac_flow_entry *entry)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
struct flow_match_ipv4_addrs match;
u32 hw_match;
int ret;
/* Nothing to do here */
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS))
return -EINVAL;
flow_rule_match_ipv4_addrs(rule, &match);
hw_match = ntohl(match.key->src) & ntohl(match.mask->src);
if (hw_match) {
ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
false, true, inv, hw_match);
if (ret)
return ret;
}
hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst);
if (hw_match) {
ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
false, false, inv, hw_match);
if (ret)
return ret;
}
return 0;
}
static int tc_add_ports_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls,
struct stmmac_flow_entry *entry)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
struct flow_match_ports match;
u32 hw_match;
bool is_udp;
int ret;
/* Nothing to do here */
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS))
return -EINVAL;
switch (entry->ip_proto) {
case IPPROTO_TCP:
is_udp = false;
break;
case IPPROTO_UDP:
is_udp = true;
break;
default:
return -EINVAL;
}
flow_rule_match_ports(rule, &match);
hw_match = ntohs(match.key->src) & ntohs(match.mask->src);
if (hw_match) {
ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
is_udp, true, inv, hw_match);
if (ret)
return ret;
}
hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst);
if (hw_match) {
ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
is_udp, false, inv, hw_match);
if (ret)
return ret;
}
entry->is_l4 = true;
return 0;
}
static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls,
bool get_free)
{
int i;
for (i = 0; i < priv->flow_entries_max; i++) {
struct stmmac_flow_entry *entry = &priv->flow_entries[i];
if (entry->cookie == cls->cookie)
return entry;
if (get_free && (entry->in_use == false))
return entry;
}
return NULL;
}
struct {
int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
struct stmmac_flow_entry *entry);
} tc_flow_parsers[] = {
{ .fn = tc_add_basic_flow },
{ .fn = tc_add_ip4_flow },
{ .fn = tc_add_ports_flow },
};
static int tc_add_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
int i, ret;
if (!entry) {
entry = tc_find_flow(priv, cls, true);
if (!entry)
return -ENOENT;
}
ret = tc_parse_flow_actions(priv, &rule->action, entry);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
ret = tc_flow_parsers[i].fn(priv, cls, entry);
if (!ret) {
entry->in_use = true;
continue;
}
}
if (!entry->in_use)
return -EINVAL;
entry->cookie = cls->cookie;
return 0;
}
static int tc_del_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
int ret;
if (!entry || !entry->in_use)
return -ENOENT;
if (entry->is_l4) {
ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false,
false, false, false, 0);
} else {
ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false,
false, false, false, 0);
}
entry->in_use = false;
entry->cookie = 0;
entry->is_l4 = false;
return ret;
}
static int tc_setup_cls(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
int ret = 0;
switch (cls->command) {
case FLOW_CLS_REPLACE:
ret = tc_add_flow(priv, cls);
break;
case FLOW_CLS_DESTROY:
ret = tc_del_flow(priv, cls);
break;
default:
return -EOPNOTSUPP;
}
return ret;
}
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
.setup_cbs = tc_setup_cbs,
.setup_cls = tc_setup_cls,
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment