Commit f79bebad authored by David S. Miller's avatar David S. Miller

Merge branch 'am65-cpsw-nuss-switchdev-driver'

Vignesh Raghavendra says:

====================
net: ti: am65-cpsw-nuss: Add switchdev driver

This series adds switchdev support for AM65 CPSW NUSS driver to support
multi port CPSW present on J721e and AM64 SoCs.
It adds devlink hook to switch b/w switch mode and multi mac mode.

v2:
Rebased on latest net-next
Update patch 1/4 with rationale for using devlink
====================
parents b4e18b29 e276cfb9
...@@ -49,6 +49,7 @@ Contents: ...@@ -49,6 +49,7 @@ Contents:
stmicro/stmmac stmicro/stmmac
ti/cpsw ti/cpsw
ti/cpsw_switchdev ti/cpsw_switchdev
ti/am65_nuss_cpsw_switchdev
ti/tlan ti/tlan
toshiba/spider_net toshiba/spider_net
......
.. SPDX-License-Identifier: GPL-2.0
===================================================================
Texas Instruments K3 AM65 CPSW NUSS switchdev based ethernet driver
===================================================================
:Version: 1.0
Port renaming
=============
In order to rename via udev::
ip -d link show dev sw0p1 | grep switchid
SUBSYSTEM=="net", ACTION=="add", ATTR{phys_switch_id}==<switchid>, \
ATTR{phys_port_name}!="", NAME="sw0$attr{phys_port_name}"
Multi mac mode
==============
- The driver is operating in multi-mac mode by default, thus
working as N individual network interfaces.
Devlink configuration parameters
================================
See Documentation/networking/devlink/am65-nuss-cpsw-switch.rst
Enabling "switch"
=================
The Switch mode can be enabled by configuring devlink driver parameter
"switch_mode" to 1/true::
devlink dev param set platform/c000000.ethernet \
name switch_mode value true cmode runtime
This can be done regardless of the state of Port's netdev devices - UP/DOWN, but
Port's netdev devices have to be in UP before joining to the bridge to avoid
overwriting of bridge configuration as CPSW switch driver completely reloads its
configuration when first port changes its state to UP.
When the both interfaces joined the bridge - CPSW switch driver will enable
marking packets with offload_fwd_mark flag.
All configuration is implemented via switchdev API.
Bridge setup
============
::
devlink dev param set platform/c000000.ethernet \
name switch_mode value true cmode runtime
ip link add name br0 type bridge
ip link set dev br0 type bridge ageing_time 1000
ip link set dev sw0p1 up
ip link set dev sw0p2 up
ip link set dev sw0p1 master br0
ip link set dev sw0p2 master br0
[*] bridge vlan add dev br0 vid 1 pvid untagged self
[*] if vlan_filtering=1. where default_pvid=1
Note. Steps [*] are mandatory.
On/off STP
==========
::
ip link set dev BRDEV type bridge stp_state 1/0
VLAN configuration
==================
::
bridge vlan add dev br0 vid 1 pvid untagged self <---- add cpu port to VLAN 1
Note. This step is mandatory for bridge/default_pvid.
Add extra VLANs
===============
1. untagged::
bridge vlan add dev sw0p1 vid 100 pvid untagged master
bridge vlan add dev sw0p2 vid 100 pvid untagged master
bridge vlan add dev br0 vid 100 pvid untagged self <---- Add cpu port to VLAN100
2. tagged::
bridge vlan add dev sw0p1 vid 100 master
bridge vlan add dev sw0p2 vid 100 master
bridge vlan add dev br0 vid 100 pvid tagged self <---- Add cpu port to VLAN100
FDBs
----
FDBs are automatically added on the appropriate switch port upon detection
Manually adding FDBs::
bridge fdb add aa:bb:cc:dd:ee:ff dev sw0p1 master vlan 100
bridge fdb add aa:bb:cc:dd:ee:fe dev sw0p2 master <---- Add on all VLANs
MDBs
----
MDBs are automatically added on the appropriate switch port upon detection
Manually adding MDBs::
bridge mdb add dev br0 port sw0p1 grp 239.1.1.1 permanent vid 100
bridge mdb add dev br0 port sw0p1 grp 239.1.1.1 permanent <---- Add on all VLANs
Multicast flooding
==================
CPU port mcast_flooding is always on
Turning flooding on/off on swithch ports:
bridge link set dev sw0p1 mcast_flood on/off
Access and Trunk port
=====================
::
bridge vlan add dev sw0p1 vid 100 pvid untagged master
bridge vlan add dev sw0p2 vid 100 master
bridge vlan add dev br0 vid 100 self
ip link add link br0 name br0.100 type vlan id 100
Note. Setting PVID on Bridge device itself works only for
default VLAN (default_pvid).
.. SPDX-License-Identifier: GPL-2.0
==============================
am65-cpsw-nuss devlink support
==============================
This document describes the devlink features implemented by the ``am65-cpsw-nuss``
device driver.
Parameters
==========
The ``am65-cpsw-nuss`` driver implements the following driver-specific
parameters.
.. list-table:: Driver-specific parameters implemented
:widths: 5 5 5 85
* - Name
- Type
- Mode
- Description
* - ``switch_mode``
- Boolean
- runtime
- Enable switch mode
...@@ -45,3 +45,4 @@ parameters, info versions, and other features it supports. ...@@ -45,3 +45,4 @@ parameters, info versions, and other features it supports.
sja1105 sja1105
qed qed
ti-cpsw-switch ti-cpsw-switch
am65-nuss-cpsw-switch
...@@ -92,6 +92,7 @@ config TI_CPTS ...@@ -92,6 +92,7 @@ config TI_CPTS
config TI_K3_AM65_CPSW_NUSS config TI_K3_AM65_CPSW_NUSS
tristate "TI K3 AM654x/J721E CPSW Ethernet driver" tristate "TI K3 AM654x/J721E CPSW Ethernet driver"
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
select NET_DEVLINK
select TI_DAVINCI_MDIO select TI_DAVINCI_MDIO
imply PHY_TI_GMII_SEL imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
...@@ -105,6 +106,15 @@ config TI_K3_AM65_CPSW_NUSS ...@@ -105,6 +106,15 @@ config TI_K3_AM65_CPSW_NUSS
To compile this driver as a module, choose M here: the module To compile this driver as a module, choose M here: the module
will be called ti-am65-cpsw-nuss. will be called ti-am65-cpsw-nuss.
config TI_K3_AM65_CPSW_SWITCHDEV
bool "TI K3 AM654x/J721E CPSW Switch mode support"
depends on TI_K3_AM65_CPSW_NUSS
depends on NET_SWITCHDEV
help
This enables switchdev support for TI K3 CPSWxG Ethernet
Switch. Enable this driver to support hardware switch support for AM65
CPSW NUSS driver.
config TI_K3_AM65_CPTS config TI_K3_AM65_CPTS
tristate "TI K3 AM65x CPTS" tristate "TI K3 AM65x CPTS"
depends on ARCH_K3 && OF depends on ARCH_K3 && OF
......
...@@ -26,4 +26,5 @@ keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale. ...@@ -26,4 +26,5 @@ keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.
obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o
ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "cpsw_ale.h" #include "cpsw_ale.h"
#include "cpsw_sl.h" #include "cpsw_sl.h"
#include "am65-cpsw-nuss.h" #include "am65-cpsw-nuss.h"
#include "am65-cpsw-switchdev.h"
#include "k3-cppi-desc-pool.h" #include "k3-cppi-desc-pool.h"
#include "am65-cpts.h" #include "am65-cpts.h"
...@@ -228,6 +229,9 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev, ...@@ -228,6 +229,9 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
u32 port_mask, unreg_mcast = 0; u32 port_mask, unreg_mcast = 0;
int ret; int ret;
if (!common->is_emac_mode)
return 0;
if (!netif_running(ndev) || !vid) if (!netif_running(ndev) || !vid)
return 0; return 0;
...@@ -255,6 +259,9 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev, ...@@ -255,6 +259,9 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
struct am65_cpsw_port *port = am65_ndev_to_port(ndev); struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret; int ret;
if (!common->is_emac_mode)
return 0;
if (!netif_running(ndev) || !vid) if (!netif_running(ndev) || !vid)
return 0; return 0;
...@@ -277,6 +284,11 @@ static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port, ...@@ -277,6 +284,11 @@ static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
{ {
struct am65_cpsw_common *common = port->common; struct am65_cpsw_common *common = port->common;
if (promisc && !common->is_emac_mode) {
dev_dbg(common->dev, "promisc mode requested in switch mode");
return;
}
if (promisc) { if (promisc) {
/* Enable promiscuous mode */ /* Enable promiscuous mode */
cpsw_ale_control_set(common->ale, port->port_id, cpsw_ale_control_set(common->ale, port->port_id,
...@@ -408,6 +420,11 @@ void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common) ...@@ -408,6 +420,11 @@ void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL); writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
} }
static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common, static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
netdev_features_t features) netdev_features_t features)
{ {
...@@ -454,9 +471,6 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common, ...@@ -454,9 +471,6 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
ALE_DEFAULT_THREAD_ID, 0); ALE_DEFAULT_THREAD_ID, 0);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_DEFAULT_THREAD_ENABLE, 1); ALE_DEFAULT_THREAD_ENABLE, 1);
if (AM65_CPSW_IS_CPSW2G(common))
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_PORT_NOLEARN, 1);
/* switch to vlan unaware mode */ /* switch to vlan unaware mode */
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1); cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
...@@ -470,6 +484,11 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common, ...@@ -470,6 +484,11 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
port_mask, port_mask, port_mask, port_mask,
port_mask & ~ALE_PORT_HOST); port_mask & ~ALE_PORT_HOST);
if (common->is_emac_mode)
am65_cpsw_init_host_port_emac(common);
else
am65_cpsw_init_host_port_switch(common);
for (i = 0; i < common->rx_chns.descs_num; i++) { for (i = 0; i < common->rx_chns.descs_num; i++) {
skb = __netdev_alloc_skb_ip_align(NULL, skb = __netdev_alloc_skb_ip_align(NULL,
AM65_CPSW_MAX_PACKET_SIZE, AM65_CPSW_MAX_PACKET_SIZE,
...@@ -598,7 +617,6 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) ...@@ -598,7 +617,6 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
{ {
struct am65_cpsw_common *common = am65_ndev_to_common(ndev); struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev); struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
u32 port_mask;
int ret, i; int ret, i;
ret = pm_runtime_get_sync(common->dev); ret = pm_runtime_get_sync(common->dev);
...@@ -631,19 +649,10 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) ...@@ -631,19 +649,10 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr); am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
if (port->slave.mac_only) { if (common->is_emac_mode)
/* enable mac-only mode on port */ am65_cpsw_init_port_emac_ale(port);
cpsw_ale_control_set(common->ale, port->port_id, else
ALE_PORT_MACONLY, 1); am65_cpsw_init_port_switch_ale(port);
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_NOLEARN, 1);
}
port_mask = BIT(port->port_id) | ALE_PORT_HOST;
cpsw_ale_add_ucast(common->ale, ndev->dev_addr,
HOST_PORT_NUM, ALE_SECURE, 0);
cpsw_ale_add_mcast(common->ale, ndev->broadcast,
port_mask, 0, 0, ALE_MCAST_FWD_2);
/* mac_sl should be configured via phy-link interface */ /* mac_sl should be configured via phy-link interface */
am65_cpsw_sl_ctl_reset(port); am65_cpsw_sl_ctl_reset(port);
...@@ -803,12 +812,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, ...@@ -803,12 +812,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE); new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
if (new_skb) { if (new_skb) {
ndev_priv = netdev_priv(ndev);
am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
am65_cpsw_nuss_rx_csum(skb, csum_info); am65_cpsw_nuss_rx_csum(skb, csum_info);
napi_gro_receive(&common->napi_rx, skb); napi_gro_receive(&common->napi_rx, skb);
ndev_priv = netdev_priv(ndev);
stats = this_cpu_ptr(ndev_priv->stats); stats = this_cpu_ptr(ndev_priv->stats);
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
...@@ -1451,6 +1461,13 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev, ...@@ -1451,6 +1461,13 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
stats->tx_dropped = dev->stats.tx_dropped; stats->tx_dropped = dev->stats.tx_dropped;
} }
static struct devlink_port *am65_cpsw_ndo_get_devlink_port(struct net_device *ndev)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
return &port->devlink_port;
}
static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_open = am65_cpsw_nuss_ndo_slave_open, .ndo_open = am65_cpsw_nuss_ndo_slave_open,
.ndo_stop = am65_cpsw_nuss_ndo_slave_stop, .ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
...@@ -1464,6 +1481,7 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { ...@@ -1464,6 +1481,7 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid, .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
.ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, .ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc, .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
.ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port,
}; };
static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port) static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
...@@ -2031,6 +2049,441 @@ static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common) ...@@ -2031,6 +2049,441 @@ static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
} }
} }
static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common)
{
int set_val = 0;
int i;
if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask))
set_val = 1;
dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val);
for (i = 1; i <= common->port_num; i++) {
struct am65_cpsw_port *port = am65_common_get_port(common, i);
struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
priv->offload_fwd_mark = set_val;
}
}
bool am65_cpsw_port_dev_check(const struct net_device *ndev)
{
if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) {
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
return !common->is_emac_mode;
}
return false;
}
static int am65_cpsw_netdevice_port_link(struct net_device *ndev, struct net_device *br_ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
if (!common->br_members) {
common->hw_bridge_dev = br_ndev;
} else {
/* This is adding the port to a second bridge, this is
* unsupported
*/
if (common->hw_bridge_dev != br_ndev)
return -EOPNOTSUPP;
}
common->br_members |= BIT(priv->port->port_id);
am65_cpsw_port_offload_fwd_mark_update(common);
return NOTIFY_DONE;
}
static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
common->br_members &= ~BIT(priv->port->port_id);
am65_cpsw_port_offload_fwd_mark_update(common);
if (!common->br_members)
common->hw_bridge_dev = NULL;
}
/* netdev notifier */
static int am65_cpsw_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info;
int ret = NOTIFY_DONE;
if (!am65_cpsw_port_dev_check(ndev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_CHANGEUPPER:
info = ptr;
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
ret = am65_cpsw_netdevice_port_link(ndev, info->upper_dev);
else
am65_cpsw_netdevice_port_unlink(ndev);
}
break;
default:
return NOTIFY_DONE;
}
return notifier_from_errno(ret);
}
static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw)
{
int ret = 0;
if (AM65_CPSW_IS_CPSW2G(cpsw) ||
!IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
return 0;
cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event;
ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
if (ret) {
dev_err(cpsw->dev, "can't register netdevice notifier\n");
return ret;
}
ret = am65_cpsw_switchdev_register_notifiers(cpsw);
if (ret)
unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
return ret;
}
static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw)
{
if (AM65_CPSW_IS_CPSW2G(cpsw) ||
!IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
return;
am65_cpsw_switchdev_unregister_notifiers(cpsw);
unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
}
static const struct devlink_ops am65_cpsw_devlink_ops = {};
static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw)
{
cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0,
ALE_MCAST_BLOCK_LEARN_FWD);
}
static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host = am65_common_get_host(common);
writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
am65_cpsw_init_stp_ale_entry(common);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
dev_dbg(common->dev, "Set P0_UNI_FLOOD\n");
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
}
static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host = am65_common_get_host(common);
writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
dev_dbg(common->dev, "unset P0_UNI_FLOOD\n");
/* learning make no sense in multi-mac mode */
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
}
static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
struct am65_cpsw_common *common = dl_priv->common;
dev_dbg(common->dev, "%s id:%u\n", __func__, id);
if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
return -EOPNOTSUPP;
ctx->val.vbool = !common->is_emac_mode;
return 0;
}
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port)
{
struct am65_cpsw_slave_data *slave = &port->slave;
struct am65_cpsw_common *common = port->common;
u32 port_mask;
writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
if (slave->mac_only)
/* enable mac-only mode on port */
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_MACONLY, 1);
cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1);
port_mask = BIT(port->port_id) | ALE_PORT_HOST;
cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr,
HOST_PORT_NUM, ALE_SECURE, slave->port_vlan);
cpsw_ale_add_mcast(common->ale, port->ndev->broadcast,
port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2);
}
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
{
struct am65_cpsw_slave_data *slave = &port->slave;
struct am65_cpsw_common *cpsw = port->common;
u32 port_mask;
cpsw_ale_control_set(cpsw->ale, port->port_id,
ALE_PORT_NOLEARN, 0);
cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr,
HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN,
slave->port_vlan);
port_mask = BIT(port->port_id) | ALE_PORT_HOST;
cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast,
port_mask, ALE_VLAN, slave->port_vlan,
ALE_MCAST_FWD_2);
writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
cpsw_ale_control_set(cpsw->ale, port->port_id,
ALE_PORT_MACONLY, 0);
}
static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
struct am65_cpsw_common *cpsw = dl_priv->common;
bool switch_en = ctx->val.vbool;
bool if_running = false;
int i;
dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
return -EOPNOTSUPP;
if (switch_en == !cpsw->is_emac_mode)
return 0;
if (!switch_en && cpsw->br_members) {
dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n");
return -EINVAL;
}
rtnl_lock();
cpsw->is_emac_mode = !switch_en;
for (i = 0; i < cpsw->port_num; i++) {
struct net_device *sl_ndev = cpsw->ports[i].ndev;
if (!sl_ndev || !netif_running(sl_ndev))
continue;
if_running = true;
}
if (!if_running) {
/* all ndevs are down */
for (i = 0; i < cpsw->port_num; i++) {
struct net_device *sl_ndev = cpsw->ports[i].ndev;
struct am65_cpsw_slave_data *slave;
if (!sl_ndev)
continue;
slave = am65_ndev_to_slave(sl_ndev);
if (switch_en)
slave->port_vlan = cpsw->default_vlan;
else
slave->port_vlan = 0;
}
goto exit;
}
cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
/* clean up ALE table */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1);
cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT);
if (switch_en) {
dev_info(cpsw->dev, "Enable switch mode\n");
am65_cpsw_init_host_port_switch(cpsw);
for (i = 0; i < cpsw->port_num; i++) {
struct net_device *sl_ndev = cpsw->ports[i].ndev;
struct am65_cpsw_slave_data *slave;
struct am65_cpsw_port *port;
if (!sl_ndev)
continue;
port = am65_ndev_to_port(sl_ndev);
slave = am65_ndev_to_slave(sl_ndev);
slave->port_vlan = cpsw->default_vlan;
if (netif_running(sl_ndev))
am65_cpsw_init_port_switch_ale(port);
}
} else {
dev_info(cpsw->dev, "Disable switch mode\n");
am65_cpsw_init_host_port_emac(cpsw);
for (i = 0; i < cpsw->port_num; i++) {
struct net_device *sl_ndev = cpsw->ports[i].ndev;
struct am65_cpsw_port *port;
if (!sl_ndev)
continue;
port = am65_ndev_to_port(sl_ndev);
port->slave.port_vlan = 0;
if (netif_running(sl_ndev))
am65_cpsw_init_port_emac_ale(port);
}
}
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0);
exit:
rtnl_unlock();
return 0;
}
static const struct devlink_param am65_cpsw_devlink_params[] = {
DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode",
DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
am65_cpsw_dl_switch_mode_get,
am65_cpsw_dl_switch_mode_set, NULL),
};
static void am65_cpsw_unregister_devlink_ports(struct am65_cpsw_common *common)
{
struct devlink_port *dl_port;
struct am65_cpsw_port *port;
int i;
for (i = 1; i <= common->port_num; i++) {
port = am65_common_get_port(common, i);
dl_port = &port->devlink_port;
if (dl_port->registered)
devlink_port_unregister(dl_port);
}
}
static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
{
struct devlink_port_attrs attrs = {};
struct am65_cpsw_devlink *dl_priv;
struct device *dev = common->dev;
struct devlink_port *dl_port;
struct am65_cpsw_port *port;
int ret = 0;
int i;
common->devlink =
devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv));
if (!common->devlink)
return -ENOMEM;
dl_priv = devlink_priv(common->devlink);
dl_priv->common = common;
ret = devlink_register(common->devlink, dev);
if (ret) {
dev_err(dev, "devlink reg fail ret:%d\n", ret);
goto dl_free;
}
/* Provide devlink hook to switch mode when multiple external ports
* are present NUSS switchdev driver is enabled.
*/
if (!AM65_CPSW_IS_CPSW2G(common) &&
IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
ret = devlink_params_register(common->devlink,
am65_cpsw_devlink_params,
ARRAY_SIZE(am65_cpsw_devlink_params));
if (ret) {
dev_err(dev, "devlink params reg fail ret:%d\n", ret);
goto dl_unreg;
}
devlink_params_publish(common->devlink);
}
for (i = 1; i <= common->port_num; i++) {
port = am65_common_get_port(common, i);
dl_port = &port->devlink_port;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = port->port_id;
attrs.switch_id.id_len = sizeof(resource_size_t);
memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
devlink_port_attrs_set(dl_port, &attrs);
ret = devlink_port_register(common->devlink, dl_port, port->port_id);
if (ret) {
dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n",
port->port_id, ret);
goto dl_port_unreg;
}
devlink_port_type_eth_set(dl_port, port->ndev);
}
return ret;
dl_port_unreg:
am65_cpsw_unregister_devlink_ports(common);
dl_unreg:
devlink_unregister(common->devlink);
dl_free:
devlink_free(common->devlink);
return ret;
}
static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
{
if (!AM65_CPSW_IS_CPSW2G(common) &&
IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
devlink_params_unpublish(common->devlink);
devlink_params_unregister(common->devlink, am65_cpsw_devlink_params,
ARRAY_SIZE(am65_cpsw_devlink_params));
}
am65_cpsw_unregister_devlink_ports(common);
devlink_unregister(common->devlink);
devlink_free(common->devlink);
}
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{ {
struct device *dev = common->dev; struct device *dev = common->dev;
...@@ -2064,14 +2517,24 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) ...@@ -2064,14 +2517,24 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
} }
} }
ret = am65_cpsw_register_notifiers(common);
if (ret)
goto err_cleanup_ndev;
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
goto clean_unregister_notifiers;
/* can't auto unregister ndev using devm_add_action() due to /* can't auto unregister ndev using devm_add_action() due to
* devres release sequence in DD core for DMA * devres release sequence in DD core for DMA
*/ */
return 0;
return 0;
clean_unregister_notifiers:
am65_cpsw_unregister_notifiers(common);
err_cleanup_ndev: err_cleanup_ndev:
am65_cpsw_nuss_cleanup_ndev(common); am65_cpsw_nuss_cleanup_ndev(common);
return ret; return ret;
} }
...@@ -2151,6 +2614,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) ...@@ -2151,6 +2614,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
struct device_node *node; struct device_node *node;
struct resource *res; struct resource *res;
struct clk *clk; struct clk *clk;
u64 id_temp;
int ret, i; int ret, i;
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL); common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
...@@ -2170,6 +2634,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) ...@@ -2170,6 +2634,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
if (IS_ERR(common->ss_base)) if (IS_ERR(common->ss_base))
return PTR_ERR(common->ss_base); return PTR_ERR(common->ss_base);
common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE; common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
/* Use device's physical base address as switch id */
id_temp = cpu_to_be64(res->start);
memcpy(common->switch_id, &id_temp, sizeof(res->start));
node = of_get_child_by_name(dev->of_node, "ethernet-ports"); node = of_get_child_by_name(dev->of_node, "ethernet-ports");
if (!node) if (!node)
...@@ -2183,6 +2650,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) ...@@ -2183,6 +2650,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
init_completion(&common->tdown_complete); init_completion(&common->tdown_complete);
common->tx_ch_num = 1; common->tx_ch_num = 1;
common->pf_p0_rx_ptype_rrobin = false; common->pf_p0_rx_ptype_rrobin = false;
common->default_vlan = 1;
common->ports = devm_kcalloc(dev, common->port_num, common->ports = devm_kcalloc(dev, common->port_num,
sizeof(*common->ports), sizeof(*common->ports),
...@@ -2262,6 +2730,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) ...@@ -2262,6 +2730,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
dev_set_drvdata(dev, common); dev_set_drvdata(dev, common);
common->is_emac_mode = true;
ret = am65_cpsw_nuss_init_ndevs(common); ret = am65_cpsw_nuss_init_ndevs(common);
if (ret) if (ret)
goto err_of_clear; goto err_of_clear;
...@@ -2295,6 +2765,9 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev) ...@@ -2295,6 +2765,9 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
return ret; return ret;
} }
am65_cpsw_unregister_devlink(common);
am65_cpsw_unregister_notifiers(common);
/* must unregister ndevs here because DD release_driver routine calls /* must unregister ndevs here because DD release_driver routine calls
* dma_deconfigure(dev) before devres_release_all(dev) * dma_deconfigure(dev) before devres_release_all(dev)
*/ */
......
...@@ -6,12 +6,14 @@ ...@@ -6,12 +6,14 @@
#ifndef AM65_CPSW_NUSS_H_ #ifndef AM65_CPSW_NUSS_H_
#define AM65_CPSW_NUSS_H_ #define AM65_CPSW_NUSS_H_
#include <linux/if_ether.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/phy.h> #include <linux/phy.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/soc/ti/k3-ringacc.h> #include <linux/soc/ti/k3-ringacc.h>
#include <net/devlink.h>
#include "am65-cpsw-qos.h" #include "am65-cpsw-qos.h"
struct am65_cpts; struct am65_cpts;
...@@ -22,6 +24,8 @@ struct am65_cpts; ...@@ -22,6 +24,8 @@ struct am65_cpts;
#define AM65_CPSW_MAX_RX_QUEUES 1 #define AM65_CPSW_MAX_RX_QUEUES 1
#define AM65_CPSW_MAX_RX_FLOWS 1 #define AM65_CPSW_MAX_RX_FLOWS 1
#define AM65_CPSW_PORT_VLAN_REG_OFFSET 0x014
struct am65_cpsw_slave_data { struct am65_cpsw_slave_data {
bool mac_only; bool mac_only;
struct cpsw_sl *mac_sl; struct cpsw_sl *mac_sl;
...@@ -32,6 +36,7 @@ struct am65_cpsw_slave_data { ...@@ -32,6 +36,7 @@ struct am65_cpsw_slave_data {
bool rx_pause; bool rx_pause;
bool tx_pause; bool tx_pause;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
int port_vlan;
}; };
struct am65_cpsw_port { struct am65_cpsw_port {
...@@ -47,6 +52,7 @@ struct am65_cpsw_port { ...@@ -47,6 +52,7 @@ struct am65_cpsw_port {
bool tx_ts_enabled; bool tx_ts_enabled;
bool rx_ts_enabled; bool rx_ts_enabled;
struct am65_cpsw_qos qos; struct am65_cpsw_qos qos;
struct devlink_port devlink_port;
}; };
struct am65_cpsw_host { struct am65_cpsw_host {
...@@ -85,6 +91,15 @@ struct am65_cpsw_pdata { ...@@ -85,6 +91,15 @@ struct am65_cpsw_pdata {
const char *ale_dev_id; const char *ale_dev_id;
}; };
enum cpsw_devlink_param_id {
AM65_CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
AM65_CPSW_DL_PARAM_SWITCH_MODE,
};
struct am65_cpsw_devlink {
struct am65_cpsw_common *common;
};
struct am65_cpsw_common { struct am65_cpsw_common {
struct device *dev; struct device *dev;
struct device *mdio_dev; struct device *mdio_dev;
...@@ -117,6 +132,14 @@ struct am65_cpsw_common { ...@@ -117,6 +132,14 @@ struct am65_cpsw_common {
bool pf_p0_rx_ptype_rrobin; bool pf_p0_rx_ptype_rrobin;
struct am65_cpts *cpts; struct am65_cpts *cpts;
int est_enabled; int est_enabled;
bool is_emac_mode;
u16 br_members;
int default_vlan;
struct devlink *devlink;
struct net_device *hw_bridge_dev;
struct notifier_block am65_cpsw_netdevice_nb;
unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN];
}; };
struct am65_cpsw_ndev_stats { struct am65_cpsw_ndev_stats {
...@@ -131,6 +154,7 @@ struct am65_cpsw_ndev_priv { ...@@ -131,6 +154,7 @@ struct am65_cpsw_ndev_priv {
u32 msg_enable; u32 msg_enable;
struct am65_cpsw_port *port; struct am65_cpsw_port *port;
struct am65_cpsw_ndev_stats __percpu *stats; struct am65_cpsw_ndev_stats __percpu *stats;
bool offload_fwd_mark;
}; };
#define am65_ndev_to_priv(ndev) \ #define am65_ndev_to_priv(ndev) \
...@@ -158,4 +182,6 @@ void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common); ...@@ -158,4 +182,6 @@ void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common);
void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common); void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common);
int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx); int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx);
bool am65_cpsw_port_dev_check(const struct net_device *dev);
#endif /* AM65_CPSW_NUSS_H_ */ #endif /* AM65_CPSW_NUSS_H_ */
/* SPDX-License-Identifier: GPL-2.0 */
/* Texas Instruments K3 AM65 Ethernet Switchdev Driver
*
* Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
*
*/
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include <net/switchdev.h>
#include "am65-cpsw-nuss.h"
#include "am65-cpsw-switchdev.h"
#include "cpsw_ale.h"
struct am65_cpsw_switchdev_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
struct am65_cpsw_port *port;
unsigned long event;
};
static int am65_cpsw_port_stp_state_set(struct am65_cpsw_port *port, u8 state)
{
struct am65_cpsw_common *cpsw = port->common;
u8 cpsw_state;
int ret = 0;
switch (state) {
case BR_STATE_FORWARDING:
cpsw_state = ALE_PORT_STATE_FORWARD;
break;
case BR_STATE_LEARNING:
cpsw_state = ALE_PORT_STATE_LEARN;
break;
case BR_STATE_DISABLED:
cpsw_state = ALE_PORT_STATE_DISABLE;
break;
case BR_STATE_LISTENING:
case BR_STATE_BLOCKING:
cpsw_state = ALE_PORT_STATE_BLOCK;
break;
default:
return -EOPNOTSUPP;
}
ret = cpsw_ale_control_set(cpsw->ale, port->port_id,
ALE_PORT_STATE, cpsw_state);
netdev_dbg(port->ndev, "ale state: %u\n", cpsw_state);
return ret;
}
static int am65_cpsw_port_attr_br_flags_set(struct am65_cpsw_port *port,
struct net_device *orig_dev,
unsigned long brport_flags)
{
struct am65_cpsw_common *cpsw = port->common;
bool unreg_mcast_add = false;
if (brport_flags & BR_MCAST_FLOOD)
unreg_mcast_add = true;
netdev_dbg(port->ndev, "BR_MCAST_FLOOD: %d port %u\n",
unreg_mcast_add, port->port_id);
cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(port->port_id),
unreg_mcast_add);
return 0;
}
static int am65_cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
unsigned long flags)
{
if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
return -EINVAL;
return 0;
}
static int am65_cpsw_port_attr_set(struct net_device *ndev,
const struct switchdev_attr *attr)
{
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret;
netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, port->port_id);
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
ret = am65_cpsw_port_attr_br_flags_pre_set(ndev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
ret = am65_cpsw_port_stp_state_set(port, attr->u.stp_state);
netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
ret = am65_cpsw_port_attr_br_flags_set(port, attr->orig_dev,
attr->u.brport_flags);
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
static u16 am65_cpsw_get_pvid(struct am65_cpsw_port *port)
{
struct am65_cpsw_common *cpsw = port->common;
struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
u32 pvid;
if (port->port_id)
pvid = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
else
pvid = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
pvid = pvid & 0xfff;
return pvid;
}
static void am65_cpsw_set_pvid(struct am65_cpsw_port *port, u16 vid, bool cfi, u32 cos)
{
struct am65_cpsw_common *cpsw = port->common;
struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
u32 pvid;
pvid = vid;
pvid |= cfi ? BIT(12) : 0;
pvid |= (cos & 0x7) << 13;
if (port->port_id)
writel(pvid, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
else
writel(pvid, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
}
static int am65_cpsw_port_vlan_add(struct am65_cpsw_port *port, bool untag, bool pvid,
u16 vid, struct net_device *orig_dev)
{
bool cpu_port = netif_is_bridge_master(orig_dev);
struct am65_cpsw_common *cpsw = port->common;
int unreg_mcast_mask = 0;
int reg_mcast_mask = 0;
int untag_mask = 0;
int port_mask;
int ret = 0;
u32 flags;
if (cpu_port) {
port_mask = BIT(HOST_PORT_NUM);
flags = orig_dev->flags;
unreg_mcast_mask = port_mask;
} else {
port_mask = BIT(port->port_id);
flags = port->ndev->flags;
}
if (flags & IFF_MULTICAST)
reg_mcast_mask = port_mask;
if (untag)
untag_mask = port_mask;
ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask,
reg_mcast_mask, unreg_mcast_mask);
if (ret) {
netdev_err(port->ndev, "Unable to add vlan\n");
return ret;
}
if (cpu_port)
cpsw_ale_add_ucast(cpsw->ale, port->slave.mac_addr,
HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, vid);
if (!pvid)
return ret;
am65_cpsw_set_pvid(port, vid, 0, 0);
netdev_dbg(port->ndev, "VID add: %s: vid:%u ports:%X\n",
port->ndev->name, vid, port_mask);
return ret;
}
static int am65_cpsw_port_vlan_del(struct am65_cpsw_port *port, u16 vid,
struct net_device *orig_dev)
{
bool cpu_port = netif_is_bridge_master(orig_dev);
struct am65_cpsw_common *cpsw = port->common;
int port_mask;
int ret = 0;
if (cpu_port)
port_mask = BIT(HOST_PORT_NUM);
else
port_mask = BIT(port->port_id);
ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
if (ret != 0)
return ret;
/* We don't care for the return value here, error is returned only if
* the unicast entry is not present
*/
if (cpu_port)
cpsw_ale_del_ucast(cpsw->ale, port->slave.mac_addr,
HOST_PORT_NUM, ALE_VLAN, vid);
if (vid == am65_cpsw_get_pvid(port))
am65_cpsw_set_pvid(port, 0, 0, 0);
/* We don't care for the return value here, error is returned only if
* the multicast entry is not present
*/
cpsw_ale_del_mcast(cpsw->ale, port->ndev->broadcast, port_mask,
ALE_VLAN, vid);
netdev_dbg(port->ndev, "VID del: %s: vid:%u ports:%X\n",
port->ndev->name, vid, port_mask);
return ret;
}
static int am65_cpsw_port_vlans_add(struct am65_cpsw_port *port,
const struct switchdev_obj_port_vlan *vlan)
{
bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct net_device *orig_dev = vlan->obj.orig_dev;
bool cpu_port = netif_is_bridge_master(orig_dev);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n",
port->ndev->name, vlan->vid, vlan->flags);
if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
return 0;
return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid, orig_dev);
}
static int am65_cpsw_port_vlans_del(struct am65_cpsw_port *port,
const struct switchdev_obj_port_vlan *vlan)
{
return am65_cpsw_port_vlan_del(port, vlan->vid, vlan->obj.orig_dev);
}
static int am65_cpsw_port_mdb_add(struct am65_cpsw_port *port,
struct switchdev_obj_port_mdb *mdb)
{
struct net_device *orig_dev = mdb->obj.orig_dev;
bool cpu_port = netif_is_bridge_master(orig_dev);
struct am65_cpsw_common *cpsw = port->common;
int port_mask;
int err;
if (cpu_port)
port_mask = BIT(HOST_PORT_NUM);
else
port_mask = BIT(port->port_id);
err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask,
ALE_VLAN, mdb->vid, 0);
netdev_dbg(port->ndev, "MDB add: %s: vid %u:%pM ports: %X\n",
port->ndev->name, mdb->vid, mdb->addr, port_mask);
return err;
}
static int am65_cpsw_port_mdb_del(struct am65_cpsw_port *port,
struct switchdev_obj_port_mdb *mdb)
{
struct net_device *orig_dev = mdb->obj.orig_dev;
bool cpu_port = netif_is_bridge_master(orig_dev);
struct am65_cpsw_common *cpsw = port->common;
int del_mask;
if (cpu_port)
del_mask = BIT(HOST_PORT_NUM);
else
del_mask = BIT(port->port_id);
/* Ignore error as error code is returned only when entry is already removed */
cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask,
ALE_VLAN, mdb->vid);
netdev_dbg(port->ndev, "MDB del: %s: vid %u:%pM ports: %X\n",
port->ndev->name, mdb->vid, mdb->addr, del_mask);
return 0;
}
static int am65_cpsw_port_obj_add(struct net_device *ndev,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int err = 0;
netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, port->port_id);
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = am65_cpsw_port_vlans_add(port, vlan);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
case SWITCHDEV_OBJ_ID_HOST_MDB:
err = am65_cpsw_port_mdb_add(port, mdb);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
static int am65_cpsw_port_obj_del(struct net_device *ndev,
const struct switchdev_obj *obj)
{
struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int err = 0;
netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, port->port_id);
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = am65_cpsw_port_vlans_del(port, vlan);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
case SWITCHDEV_OBJ_ID_HOST_MDB:
err = am65_cpsw_port_mdb_del(port, mdb);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
struct switchdev_notifier_fdb_info *rcv)
{
struct switchdev_notifier_fdb_info info;
info.addr = rcv->addr;
info.vid = rcv->vid;
info.offloaded = true;
call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
ndev, &info.info, NULL);
}
static void am65_cpsw_switchdev_event_work(struct work_struct *work)
{
struct am65_cpsw_switchdev_event_work *switchdev_work =
container_of(work, struct am65_cpsw_switchdev_event_work, work);
struct am65_cpsw_port *port = switchdev_work->port;
struct switchdev_notifier_fdb_info *fdb;
struct am65_cpsw_common *cpsw = port->common;
int port_id = port->port_id;
rtnl_lock();
switch (switchdev_work->event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
fdb = &switchdev_work->fdb_info;
netdev_dbg(port->ndev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
fdb->addr, fdb->vid, fdb->added_by_user,
fdb->offloaded, port_id);
if (!fdb->added_by_user)
break;
if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
port_id = HOST_PORT_NUM;
cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
fdb->vid ? ALE_VLAN : 0, fdb->vid);
am65_cpsw_fdb_offload_notify(port->ndev, fdb);
break;
case SWITCHDEV_FDB_DEL_TO_DEVICE:
fdb = &switchdev_work->fdb_info;
netdev_dbg(port->ndev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
fdb->addr, fdb->vid, fdb->added_by_user,
fdb->offloaded, port_id);
if (!fdb->added_by_user)
break;
if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
port_id = HOST_PORT_NUM;
cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
fdb->vid ? ALE_VLAN : 0, fdb->vid);
break;
default:
break;
}
rtnl_unlock();
kfree(switchdev_work->fdb_info.addr);
kfree(switchdev_work);
dev_put(port->ndev);
}
/* called under rcu_read_lock() */
static int am65_cpsw_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
struct am65_cpsw_switchdev_event_work *switchdev_work;
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct switchdev_notifier_fdb_info *fdb_info = ptr;
int err;
if (event == SWITCHDEV_PORT_ATTR_SET) {
err = switchdev_handle_port_attr_set(ndev, ptr,
am65_cpsw_port_dev_check,
am65_cpsw_port_attr_set);
return notifier_from_errno(err);
}
if (!am65_cpsw_port_dev_check(ndev))
return NOTIFY_DONE;
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
if (WARN_ON(!switchdev_work))
return NOTIFY_BAD;
INIT_WORK(&switchdev_work->work, am65_cpsw_switchdev_event_work);
switchdev_work->port = port;
switchdev_work->event = event;
switch (event) {
case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
memcpy(&switchdev_work->fdb_info, ptr,
sizeof(switchdev_work->fdb_info));
switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
if (!switchdev_work->fdb_info.addr)
goto err_addr_alloc;
ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
fdb_info->addr);
dev_hold(ndev);
break;
default:
kfree(switchdev_work);
return NOTIFY_DONE;
}
queue_work(system_long_wq, &switchdev_work->work);
return NOTIFY_DONE;
err_addr_alloc:
kfree(switchdev_work);
return NOTIFY_BAD;
}
static struct notifier_block cpsw_switchdev_notifier = {
.notifier_call = am65_cpsw_switchdev_event,
};
static int am65_cpsw_switchdev_blocking_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
int err;
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
err = switchdev_handle_port_obj_add(dev, ptr,
am65_cpsw_port_dev_check,
am65_cpsw_port_obj_add);
return notifier_from_errno(err);
case SWITCHDEV_PORT_OBJ_DEL:
err = switchdev_handle_port_obj_del(dev, ptr,
am65_cpsw_port_dev_check,
am65_cpsw_port_obj_del);
return notifier_from_errno(err);
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
am65_cpsw_port_dev_check,
am65_cpsw_port_attr_set);
return notifier_from_errno(err);
default:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block cpsw_switchdev_bl_notifier = {
.notifier_call = am65_cpsw_switchdev_blocking_event,
};
int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw)
{
int ret = 0;
ret = register_switchdev_notifier(&cpsw_switchdev_notifier);
if (ret) {
dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n",
ret);
return ret;
}
ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
if (ret) {
dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n",
ret);
unregister_switchdev_notifier(&cpsw_switchdev_notifier);
}
return ret;
}
void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw)
{
unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
unregister_switchdev_notifier(&cpsw_switchdev_notifier);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_
#define DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_
#include <linux/skbuff.h>
#if IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)
static inline void am65_cpsw_nuss_set_offload_fwd_mark(struct sk_buff *skb, bool val)
{
skb->offload_fwd_mark = val;
}
int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw);
void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw);
#else
static inline int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw)
{
return -EOPNOTSUPP;
}
static inline void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw)
{
}
static inline void am65_cpsw_nuss_set_offload_fwd_mark(struct sk_buff *skb, bool val)
{
}
#endif
#endif /* DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment