Commit bd954b82 authored by Vladimir Oltean's avatar Vladimir Oltean Committed by Jakub Kicinski

net: dsa: move tagging protocol code to tag.{c,h}

It would be nice if tagging protocol drivers could include just the
header they need, since they are (mostly) data path and isolated from
most of the other DSA core code does.

Create a tag.c and a tag.h file which are meant to support tagging
protocol drivers.
Signed-off-by: default avatarVladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: default avatarFlorian Fainelli <f.fainelli@gmail.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 09f92341
...@@ -10,6 +10,7 @@ dsa_core-y += \ ...@@ -10,6 +10,7 @@ dsa_core-y += \
port.o \ port.o \
slave.o \ slave.o \
switch.o \ switch.o \
tag.o \
tag_8021q.o tag_8021q.o
# tagging formats # tagging formats
......
...@@ -10,127 +10,10 @@ ...@@ -10,127 +10,10 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/ptp_classify.h>
#include <net/dst_metadata.h>
#include "dsa_priv.h" #include "dsa_priv.h"
#include "slave.h" #include "slave.h"
#include "tag.h"
static LIST_HEAD(dsa_tag_drivers_list);
static DEFINE_MUTEX(dsa_tag_drivers_lock);
static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
struct module *owner)
{
dsa_tag_driver->owner = owner;
mutex_lock(&dsa_tag_drivers_lock);
list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
mutex_unlock(&dsa_tag_drivers_lock);
}
void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
unsigned int count, struct module *owner)
{
unsigned int i;
for (i = 0; i < count; i++)
dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
}
static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
{
mutex_lock(&dsa_tag_drivers_lock);
list_del(&dsa_tag_driver->list);
mutex_unlock(&dsa_tag_drivers_lock);
}
EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
unsigned int count)
{
unsigned int i;
for (i = 0; i < count; i++)
dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
}
EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
{
return ops->name;
};
/* Function takes a reference on the module owning the tagger,
* so dsa_tag_driver_put must be called afterwards.
*/
const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name)
{
const struct dsa_device_ops *ops = ERR_PTR(-ENOPROTOOPT);
struct dsa_tag_driver *dsa_tag_driver;
request_module("%s%s", DSA_TAG_DRIVER_ALIAS, name);
mutex_lock(&dsa_tag_drivers_lock);
list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
const struct dsa_device_ops *tmp = dsa_tag_driver->ops;
if (strcmp(name, tmp->name))
continue;
if (!try_module_get(dsa_tag_driver->owner))
break;
ops = tmp;
break;
}
mutex_unlock(&dsa_tag_drivers_lock);
return ops;
}
const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol)
{
struct dsa_tag_driver *dsa_tag_driver;
const struct dsa_device_ops *ops;
bool found = false;
request_module("%sid-%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
mutex_lock(&dsa_tag_drivers_lock);
list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
ops = dsa_tag_driver->ops;
if (ops->proto == tag_protocol) {
found = true;
break;
}
}
if (found) {
if (!try_module_get(dsa_tag_driver->owner))
ops = ERR_PTR(-ENOPROTOOPT);
} else {
ops = ERR_PTR(-ENOPROTOOPT);
}
mutex_unlock(&dsa_tag_drivers_lock);
return ops;
}
void dsa_tag_driver_put(const struct dsa_device_ops *ops)
{
struct dsa_tag_driver *dsa_tag_driver;
mutex_lock(&dsa_tag_drivers_lock);
list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
if (dsa_tag_driver->ops == ops) {
module_put(dsa_tag_driver->owner);
break;
}
}
mutex_unlock(&dsa_tag_drivers_lock);
}
static int dev_is_class(struct device *dev, void *class) static int dev_is_class(struct device *dev, void *class)
{ {
...@@ -168,111 +51,6 @@ struct net_device *dsa_dev_to_net_device(struct device *dev) ...@@ -168,111 +51,6 @@ struct net_device *dsa_dev_to_net_device(struct device *dev)
return NULL; return NULL;
} }
/* Determine if we should defer delivery of skb until we have a rx timestamp.
*
* Called from dsa_switch_rcv. For now, this will only work if tagging is
* enabled on the switch. Normally the MAC driver would retrieve the hardware
* timestamp when it reads the packet out of the hardware. However in a DSA
* switch, the DSA driver owning the interface to which the packet is
* delivered is never notified unless we do so here.
*/
static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
struct sk_buff *skb)
{
struct dsa_switch *ds = p->dp->ds;
unsigned int type;
if (skb_headroom(skb) < ETH_HLEN)
return false;
__skb_push(skb, ETH_HLEN);
type = ptp_classify_raw(skb);
__skb_pull(skb, ETH_HLEN);
if (type == PTP_CLASS_NONE)
return false;
if (likely(ds->ops->port_rxtstamp))
return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
return false;
}
static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *unused)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct sk_buff *nskb = NULL;
struct dsa_slave_priv *p;
if (unlikely(!cpu_dp)) {
kfree_skb(skb);
return 0;
}
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb)
return 0;
if (md_dst && md_dst->type == METADATA_HW_PORT_MUX) {
unsigned int port = md_dst->u.port_info.port_id;
skb_dst_drop(skb);
if (!skb_has_extensions(skb))
skb->slow_gro = 0;
skb->dev = dsa_master_find_slave(dev, 0, port);
if (likely(skb->dev)) {
dsa_default_offload_fwd_mark(skb);
nskb = skb;
}
} else {
nskb = cpu_dp->rcv(skb, dev);
}
if (!nskb) {
kfree_skb(skb);
return 0;
}
skb = nskb;
skb_push(skb, ETH_HLEN);
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
if (unlikely(!dsa_slave_dev_check(skb->dev))) {
/* Packet is to be injected directly on an upper
* device, e.g. a team/bond, so skip all DSA-port
* specific actions.
*/
netif_rx(skb);
return 0;
}
p = netdev_priv(skb->dev);
if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
nskb = dsa_untag_bridge_pvid(skb);
if (!nskb) {
kfree_skb(skb);
return 0;
}
skb = nskb;
}
dev_sw_netstats_rx_add(skb->dev, skb->len);
if (dsa_skb_defer_rx_timestamp(p, skb))
return 0;
gro_cells_receive(&p->gcells, skb);
return 0;
}
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
static bool dsa_port_is_initialized(const struct dsa_port *dp) static bool dsa_port_is_initialized(const struct dsa_port *dp)
{ {
...@@ -327,11 +105,6 @@ int dsa_switch_resume(struct dsa_switch *ds) ...@@ -327,11 +105,6 @@ int dsa_switch_resume(struct dsa_switch *ds)
EXPORT_SYMBOL_GPL(dsa_switch_resume); EXPORT_SYMBOL_GPL(dsa_switch_resume);
#endif #endif
static struct packet_type dsa_pack_type __read_mostly = {
.type = cpu_to_be16(ETH_P_XDSA),
.func = dsa_switch_rcv,
};
static struct workqueue_struct *dsa_owq; static struct workqueue_struct *dsa_owq;
bool dsa_schedule_work(struct work_struct *work) bool dsa_schedule_work(struct work_struct *work)
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "master.h" #include "master.h"
#include "port.h" #include "port.h"
#include "slave.h" #include "slave.h"
#include "tag.h"
static DEFINE_MUTEX(dsa2_mutex); static DEFINE_MUTEX(dsa2_mutex);
LIST_HEAD(dsa_tree_list); LIST_HEAD(dsa_tree_list);
......
...@@ -11,85 +11,8 @@ ...@@ -11,85 +11,8 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <net/dsa.h> #include <net/dsa.h>
#include "slave.h"
#define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG #define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG
/* Create 2 modaliases per tagging protocol, one to auto-load the module
* given the ID reported by get_tag_protocol(), and the other by name.
*/
#define DSA_TAG_DRIVER_ALIAS "dsa_tag:"
#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto, __name) \
MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __name); \
MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS "id-" \
__stringify(__proto##_VALUE))
struct dsa_tag_driver {
const struct dsa_device_ops *ops;
struct list_head list;
struct module *owner;
};
void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
unsigned int count,
struct module *owner);
void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
unsigned int count);
#define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count) \
static int __init dsa_tag_driver_module_init(void) \
{ \
dsa_tag_drivers_register(__dsa_tag_drivers_array, __count, \
THIS_MODULE); \
return 0; \
} \
module_init(dsa_tag_driver_module_init); \
\
static void __exit dsa_tag_driver_module_exit(void) \
{ \
dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count); \
} \
module_exit(dsa_tag_driver_module_exit)
/**
* module_dsa_tag_drivers() - Helper macro for registering DSA tag
* drivers
* @__ops_array: Array of tag driver structures
*
* Helper macro for DSA tag drivers which do not do anything special
* in module init/exit. Each module may only use this macro once, and
* calling it replaces module_init() and module_exit().
*/
#define module_dsa_tag_drivers(__ops_array) \
dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array))
#define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops
/* Create a static structure we can build a linked list of dsa_tag
* drivers
*/
#define DSA_TAG_DRIVER(__ops) \
static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = { \
.ops = &__ops, \
}
/**
* module_dsa_tag_driver() - Helper macro for registering a single DSA tag
* driver
* @__ops: Single tag driver structures
*
* Helper macro for DSA tag drivers which do not do anything special
* in module init/exit. Each module may only use this macro once, and
* calling it replaces module_init() and module_exit().
*/
#define module_dsa_tag_driver(__ops) \
DSA_TAG_DRIVER(__ops); \
\
static struct dsa_tag_driver *dsa_tag_driver_array[] = { \
&DSA_TAG_DRIVER_NAME(__ops) \
}; \
module_dsa_tag_drivers(dsa_tag_driver_array)
enum { enum {
DSA_NOTIFIER_AGEING_TIME, DSA_NOTIFIER_AGEING_TIME,
DSA_NOTIFIER_BRIDGE_JOIN, DSA_NOTIFIER_BRIDGE_JOIN,
...@@ -223,234 +146,15 @@ struct dsa_standalone_event_work { ...@@ -223,234 +146,15 @@ struct dsa_standalone_event_work {
}; };
/* dsa.c */ /* dsa.c */
const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol);
const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name);
void dsa_tag_driver_put(const struct dsa_device_ops *ops);
struct net_device *dsa_dev_to_net_device(struct device *dev); struct net_device *dsa_dev_to_net_device(struct device *dev);
bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b); bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b);
bool dsa_schedule_work(struct work_struct *work); bool dsa_schedule_work(struct work_struct *work);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
{
return ops->needed_headroom + ops->needed_tailroom;
}
static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
int device, int port)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dp->ds->index == device && dp->index == port &&
dp->type == DSA_PORT_TYPE_USER)
return dp->slave;
return NULL;
}
/* netlink.c */ /* netlink.c */
extern struct rtnl_link_ops dsa_link_ops __read_mostly; extern struct rtnl_link_ops dsa_link_ops __read_mostly;
/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
* frames as untagged, since the bridge will not untag them.
*/
static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
{
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct net_device *dev = skb->dev;
struct net_device *upper_dev;
u16 vid, pvid, proto;
int err;
if (!br || br_vlan_enabled(br))
return skb;
err = br_vlan_get_proto(br, &proto);
if (err)
return skb;
/* Move VLAN tag from data to hwaccel */
if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
skb = skb_vlan_untag(skb);
if (!skb)
return NULL;
}
if (!skb_vlan_tag_present(skb))
return skb;
vid = skb_vlan_tag_get_id(skb);
/* We already run under an RCU read-side critical section since
* we are called from netif_receive_skb_list_internal().
*/
err = br_vlan_get_pvid_rcu(dev, &pvid);
if (err)
return skb;
if (vid != pvid)
return skb;
/* The sad part about attempting to untag from DSA is that we
* don't know, unless we check, if the skb will end up in
* the bridge's data path - br_allowed_ingress() - or not.
* For example, there might be an 8021q upper for the
* default_pvid of the bridge, which will steal VLAN-tagged traffic
* from the bridge's data path. This is a configuration that DSA
* supports because vlan_filtering is 0. In that case, we should
* definitely keep the tag, to make sure it keeps working.
*/
upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
if (upper_dev)
return skb;
__vlan_hwaccel_clear_tag(skb);
return skb;
}
/* For switches without hardware support for DSA tagging to be able
* to support termination through the bridge.
*/
static inline struct net_device *
dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
{
struct dsa_port *cpu_dp = master->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct bridge_vlan_info vinfo;
struct net_device *slave;
struct dsa_port *dp;
int err;
list_for_each_entry(dp, &dst->ports, list) {
if (dp->type != DSA_PORT_TYPE_USER)
continue;
if (!dp->bridge)
continue;
if (dp->stp_state != BR_STATE_LEARNING &&
dp->stp_state != BR_STATE_FORWARDING)
continue;
/* Since the bridge might learn this packet, keep the CPU port
* affinity with the port that will be used for the reply on
* xmit.
*/
if (dp->cpu_dp != cpu_dp)
continue;
slave = dp->slave;
err = br_vlan_get_info_rcu(slave, vid, &vinfo);
if (err)
continue;
return slave;
}
return NULL;
}
/* If the ingress port offloads the bridge, we mark the frame as autonomously
* forwarded by hardware, so the software bridge doesn't forward in twice, back
* to us, because we already did. However, if we're in fallback mode and we do
* software bridging, we are not offloading it, therefore the dp->bridge
* pointer is not populated, and flooding needs to be done by software (we are
* effectively operating in standalone ports mode).
*/
static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
{
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
skb->offload_fwd_mark = !!(dp->bridge);
}
/* Helper for removing DSA header tags from packets in the RX path.
* Must not be called before skb_pull(len).
* skb->data
* |
* v
* | | | | | | | | | | | | | | | | | | |
* +-----------------------+-----------------------+---------------+-------+
* | Destination MAC | Source MAC | DSA header | EType |
* +-----------------------+-----------------------+---------------+-------+
* | |
* <----- len -----> <----- len ----->
* |
* >>>>>>> v
* >>>>>>> | | | | | | | | | | | | | | |
* >>>>>>> +-----------------------+-----------------------+-------+
* >>>>>>> | Destination MAC | Source MAC | EType |
* +-----------------------+-----------------------+-------+
* ^
* |
* skb->data
*/
static inline void dsa_strip_etype_header(struct sk_buff *skb, int len)
{
memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - len, 2 * ETH_ALEN);
}
/* Helper for creating space for DSA header tags in TX path packets.
* Must not be called before skb_push(len).
*
* Before:
*
* <<<<<<< | | | | | | | | | | | | | | |
* ^ <<<<<<< +-----------------------+-----------------------+-------+
* | <<<<<<< | Destination MAC | Source MAC | EType |
* | +-----------------------+-----------------------+-------+
* <----- len ----->
* |
* |
* skb->data
*
* After:
*
* | | | | | | | | | | | | | | | | | | |
* +-----------------------+-----------------------+---------------+-------+
* | Destination MAC | Source MAC | DSA header | EType |
* +-----------------------+-----------------------+---------------+-------+
* ^ | |
* | <----- len ----->
* skb->data
*/
static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
{
memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
}
/* On RX, eth_type_trans() on the DSA master pulls ETH_HLEN bytes starting from
* skb_mac_header(skb), which leaves skb->data pointing at the first byte after
* what the DSA master perceives as the EtherType (the beginning of the L3
* protocol). Since DSA EtherType header taggers treat the EtherType as part of
* the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header
* is located 2 bytes behind skb->data. Note that EtherType in this context
* means the first 2 bytes of the DSA header, not the encapsulated EtherType
* that will become visible after the DSA header is stripped.
*/
static inline void *dsa_etype_header_pos_rx(struct sk_buff *skb)
{
return skb->data - 2;
}
/* On TX, skb->data points to skb_mac_header(skb), which means that EtherType
* header taggers start exactly where the EtherType is (the EtherType is
* treated as part of the DSA header).
*/
static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
{
return skb->data + 2 * ETH_ALEN;
}
/* switch.c */ /* switch.c */
int dsa_switch_register_notifier(struct dsa_switch *ds); int dsa_switch_register_notifier(struct dsa_switch *ds);
void dsa_switch_unregister_notifier(struct dsa_switch *ds); void dsa_switch_unregister_notifier(struct dsa_switch *ds);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "dsa_priv.h" #include "dsa_priv.h"
#include "master.h" #include "master.h"
#include "port.h" #include "port.h"
#include "tag.h"
static int dsa_master_get_regs_len(struct net_device *dev) static int dsa_master_get_regs_len(struct net_device *dev)
{ {
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "port.h" #include "port.h"
#include "master.h" #include "master.h"
#include "slave.h" #include "slave.h"
#include "tag.h"
static void dsa_slave_standalone_event_work(struct work_struct *work) static void dsa_slave_standalone_event_work(struct work_struct *work)
{ {
......
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DSA tagging protocol handling
*
* Copyright (c) 2008-2009 Marvell Semiconductor
* Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
* Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
*/
#include <linux/netdevice.h>
#include <linux/ptp_classify.h>
#include <linux/skbuff.h>
#include <net/dsa.h>
#include <net/dst_metadata.h>
#include "slave.h"
#include "tag.h"
static LIST_HEAD(dsa_tag_drivers_list);
static DEFINE_MUTEX(dsa_tag_drivers_lock);
/* Determine if we should defer delivery of skb until we have a rx timestamp.
*
* Called from dsa_switch_rcv. For now, this will only work if tagging is
* enabled on the switch. Normally the MAC driver would retrieve the hardware
* timestamp when it reads the packet out of the hardware. However in a DSA
* switch, the DSA driver owning the interface to which the packet is
* delivered is never notified unless we do so here.
*/
static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
struct sk_buff *skb)
{
struct dsa_switch *ds = p->dp->ds;
unsigned int type;
if (skb_headroom(skb) < ETH_HLEN)
return false;
__skb_push(skb, ETH_HLEN);
type = ptp_classify_raw(skb);
__skb_pull(skb, ETH_HLEN);
if (type == PTP_CLASS_NONE)
return false;
if (likely(ds->ops->port_rxtstamp))
return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
return false;
}
static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *unused)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct sk_buff *nskb = NULL;
struct dsa_slave_priv *p;
if (unlikely(!cpu_dp)) {
kfree_skb(skb);
return 0;
}
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb)
return 0;
if (md_dst && md_dst->type == METADATA_HW_PORT_MUX) {
unsigned int port = md_dst->u.port_info.port_id;
skb_dst_drop(skb);
if (!skb_has_extensions(skb))
skb->slow_gro = 0;
skb->dev = dsa_master_find_slave(dev, 0, port);
if (likely(skb->dev)) {
dsa_default_offload_fwd_mark(skb);
nskb = skb;
}
} else {
nskb = cpu_dp->rcv(skb, dev);
}
if (!nskb) {
kfree_skb(skb);
return 0;
}
skb = nskb;
skb_push(skb, ETH_HLEN);
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
if (unlikely(!dsa_slave_dev_check(skb->dev))) {
/* Packet is to be injected directly on an upper
* device, e.g. a team/bond, so skip all DSA-port
* specific actions.
*/
netif_rx(skb);
return 0;
}
p = netdev_priv(skb->dev);
if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
nskb = dsa_untag_bridge_pvid(skb);
if (!nskb) {
kfree_skb(skb);
return 0;
}
skb = nskb;
}
dev_sw_netstats_rx_add(skb->dev, skb->len);
if (dsa_skb_defer_rx_timestamp(p, skb))
return 0;
gro_cells_receive(&p->gcells, skb);
return 0;
}
struct packet_type dsa_pack_type __read_mostly = {
.type = cpu_to_be16(ETH_P_XDSA),
.func = dsa_switch_rcv,
};
static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
struct module *owner)
{
dsa_tag_driver->owner = owner;
mutex_lock(&dsa_tag_drivers_lock);
list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
mutex_unlock(&dsa_tag_drivers_lock);
}
void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
unsigned int count, struct module *owner)
{
unsigned int i;
for (i = 0; i < count; i++)
dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
}
static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
{
mutex_lock(&dsa_tag_drivers_lock);
list_del(&dsa_tag_driver->list);
mutex_unlock(&dsa_tag_drivers_lock);
}
EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
unsigned int count)
{
unsigned int i;
for (i = 0; i < count; i++)
dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
}
EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
{
return ops->name;
};
/* Function takes a reference on the module owning the tagger,
* so dsa_tag_driver_put must be called afterwards.
*/
const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name)
{
const struct dsa_device_ops *ops = ERR_PTR(-ENOPROTOOPT);
struct dsa_tag_driver *dsa_tag_driver;
request_module("%s%s", DSA_TAG_DRIVER_ALIAS, name);
mutex_lock(&dsa_tag_drivers_lock);
list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
const struct dsa_device_ops *tmp = dsa_tag_driver->ops;
if (strcmp(name, tmp->name))
continue;
if (!try_module_get(dsa_tag_driver->owner))
break;
ops = tmp;
break;
}
mutex_unlock(&dsa_tag_drivers_lock);
return ops;
}
const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol)
{
struct dsa_tag_driver *dsa_tag_driver;
const struct dsa_device_ops *ops;
bool found = false;
request_module("%sid-%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
mutex_lock(&dsa_tag_drivers_lock);
list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
ops = dsa_tag_driver->ops;
if (ops->proto == tag_protocol) {
found = true;
break;
}
}
if (found) {
if (!try_module_get(dsa_tag_driver->owner))
ops = ERR_PTR(-ENOPROTOOPT);
} else {
ops = ERR_PTR(-ENOPROTOOPT);
}
mutex_unlock(&dsa_tag_drivers_lock);
return ops;
}
void dsa_tag_driver_put(const struct dsa_device_ops *ops)
{
struct dsa_tag_driver *dsa_tag_driver;
mutex_lock(&dsa_tag_drivers_lock);
list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
if (dsa_tag_driver->ops == ops) {
module_put(dsa_tag_driver->owner);
break;
}
}
mutex_unlock(&dsa_tag_drivers_lock);
}
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef __DSA_TAG_H
#define __DSA_TAG_H
#include <linux/if_vlan.h>
#include <linux/list.h>
#include <linux/types.h>
#include <net/dsa.h>
#include "port.h"
#include "slave.h"
struct dsa_tag_driver {
const struct dsa_device_ops *ops;
struct list_head list;
struct module *owner;
};
extern struct packet_type dsa_pack_type;
const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol);
const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name);
void dsa_tag_driver_put(const struct dsa_device_ops *ops);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
{
return ops->needed_headroom + ops->needed_tailroom;
}
static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
int device, int port)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_port *dp;
list_for_each_entry(dp, &dst->ports, list)
if (dp->ds->index == device && dp->index == port &&
dp->type == DSA_PORT_TYPE_USER)
return dp->slave;
return NULL;
}
/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
* frames as untagged, since the bridge will not untag them.
*/
static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
{
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct net_device *dev = skb->dev;
struct net_device *upper_dev;
u16 vid, pvid, proto;
int err;
if (!br || br_vlan_enabled(br))
return skb;
err = br_vlan_get_proto(br, &proto);
if (err)
return skb;
/* Move VLAN tag from data to hwaccel */
if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
skb = skb_vlan_untag(skb);
if (!skb)
return NULL;
}
if (!skb_vlan_tag_present(skb))
return skb;
vid = skb_vlan_tag_get_id(skb);
/* We already run under an RCU read-side critical section since
* we are called from netif_receive_skb_list_internal().
*/
err = br_vlan_get_pvid_rcu(dev, &pvid);
if (err)
return skb;
if (vid != pvid)
return skb;
/* The sad part about attempting to untag from DSA is that we
* don't know, unless we check, if the skb will end up in
* the bridge's data path - br_allowed_ingress() - or not.
* For example, there might be an 8021q upper for the
* default_pvid of the bridge, which will steal VLAN-tagged traffic
* from the bridge's data path. This is a configuration that DSA
* supports because vlan_filtering is 0. In that case, we should
* definitely keep the tag, to make sure it keeps working.
*/
upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
if (upper_dev)
return skb;
__vlan_hwaccel_clear_tag(skb);
return skb;
}
/* For switches without hardware support for DSA tagging to be able
* to support termination through the bridge.
*/
static inline struct net_device *
dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
{
struct dsa_port *cpu_dp = master->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct bridge_vlan_info vinfo;
struct net_device *slave;
struct dsa_port *dp;
int err;
list_for_each_entry(dp, &dst->ports, list) {
if (dp->type != DSA_PORT_TYPE_USER)
continue;
if (!dp->bridge)
continue;
if (dp->stp_state != BR_STATE_LEARNING &&
dp->stp_state != BR_STATE_FORWARDING)
continue;
/* Since the bridge might learn this packet, keep the CPU port
* affinity with the port that will be used for the reply on
* xmit.
*/
if (dp->cpu_dp != cpu_dp)
continue;
slave = dp->slave;
err = br_vlan_get_info_rcu(slave, vid, &vinfo);
if (err)
continue;
return slave;
}
return NULL;
}
/* If the ingress port offloads the bridge, we mark the frame as autonomously
* forwarded by hardware, so the software bridge doesn't forward in twice, back
* to us, because we already did. However, if we're in fallback mode and we do
* software bridging, we are not offloading it, therefore the dp->bridge
* pointer is not populated, and flooding needs to be done by software (we are
* effectively operating in standalone ports mode).
*/
static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
{
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
skb->offload_fwd_mark = !!(dp->bridge);
}
/* Helper for removing DSA header tags from packets in the RX path.
* Must not be called before skb_pull(len).
* skb->data
* |
* v
* | | | | | | | | | | | | | | | | | | |
* +-----------------------+-----------------------+---------------+-------+
* | Destination MAC | Source MAC | DSA header | EType |
* +-----------------------+-----------------------+---------------+-------+
* | |
* <----- len -----> <----- len ----->
* |
* >>>>>>> v
* >>>>>>> | | | | | | | | | | | | | | |
* >>>>>>> +-----------------------+-----------------------+-------+
* >>>>>>> | Destination MAC | Source MAC | EType |
* +-----------------------+-----------------------+-------+
* ^
* |
* skb->data
*/
static inline void dsa_strip_etype_header(struct sk_buff *skb, int len)
{
memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - len, 2 * ETH_ALEN);
}
/* Helper for creating space for DSA header tags in TX path packets.
* Must not be called before skb_push(len).
*
* Before:
*
* <<<<<<< | | | | | | | | | | | | | | |
* ^ <<<<<<< +-----------------------+-----------------------+-------+
* | <<<<<<< | Destination MAC | Source MAC | EType |
* | +-----------------------+-----------------------+-------+
* <----- len ----->
* |
* |
* skb->data
*
* After:
*
* | | | | | | | | | | | | | | | | | | |
* +-----------------------+-----------------------+---------------+-------+
* | Destination MAC | Source MAC | DSA header | EType |
* +-----------------------+-----------------------+---------------+-------+
* ^ | |
* | <----- len ----->
* skb->data
*/
static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
{
memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
}
/* On RX, eth_type_trans() on the DSA master pulls ETH_HLEN bytes starting from
* skb_mac_header(skb), which leaves skb->data pointing at the first byte after
* what the DSA master perceives as the EtherType (the beginning of the L3
* protocol). Since DSA EtherType header taggers treat the EtherType as part of
* the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header
* is located 2 bytes behind skb->data. Note that EtherType in this context
* means the first 2 bytes of the DSA header, not the encapsulated EtherType
* that will become visible after the DSA header is stripped.
*/
static inline void *dsa_etype_header_pos_rx(struct sk_buff *skb)
{
return skb->data - 2;
}
/* On TX, skb->data points to skb_mac_header(skb), which means that EtherType
* header taggers start exactly where the EtherType is (the EtherType is
* treated as part of the DSA header).
*/
static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
{
return skb->data + 2 * ETH_ALEN;
}
/* Create 2 modaliases per tagging protocol, one to auto-load the module
* given the ID reported by get_tag_protocol(), and the other by name.
*/
#define DSA_TAG_DRIVER_ALIAS "dsa_tag:"
#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto, __name) \
MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __name); \
MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS "id-" \
__stringify(__proto##_VALUE))
void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
unsigned int count,
struct module *owner);
void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
unsigned int count);
#define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count) \
static int __init dsa_tag_driver_module_init(void) \
{ \
dsa_tag_drivers_register(__dsa_tag_drivers_array, __count, \
THIS_MODULE); \
return 0; \
} \
module_init(dsa_tag_driver_module_init); \
\
static void __exit dsa_tag_driver_module_exit(void) \
{ \
dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count); \
} \
module_exit(dsa_tag_driver_module_exit)
/**
* module_dsa_tag_drivers() - Helper macro for registering DSA tag
* drivers
* @__ops_array: Array of tag driver structures
*
* Helper macro for DSA tag drivers which do not do anything special
* in module init/exit. Each module may only use this macro once, and
* calling it replaces module_init() and module_exit().
*/
#define module_dsa_tag_drivers(__ops_array) \
dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array))
#define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops
/* Create a static structure we can build a linked list of dsa_tag
* drivers
*/
#define DSA_TAG_DRIVER(__ops) \
static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = { \
.ops = &__ops, \
}
/**
* module_dsa_tag_driver() - Helper macro for registering a single DSA tag
* driver
* @__ops: Single tag driver structures
*
* Helper macro for DSA tag drivers which do not do anything special
* in module init/exit. Each module may only use this macro once, and
* calling it replaces module_init() and module_exit().
*/
#define module_dsa_tag_driver(__ops) \
DSA_TAG_DRIVER(__ops); \
\
static struct dsa_tag_driver *dsa_tag_driver_array[] = { \
&DSA_TAG_DRIVER_NAME(__ops) \
}; \
module_dsa_tag_drivers(dsa_tag_driver_array)
#endif
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "dsa_priv.h" #include "dsa_priv.h"
#include "port.h" #include "port.h"
#include "tag.h"
/* Binary structure of the fake 12-bit VID field (when the TPID is /* Binary structure of the fake 12-bit VID field (when the TPID is
* ETH_P_DSA_8021Q): * ETH_P_DSA_8021Q):
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include "dsa_priv.h" #include "tag.h"
#define AR9331_NAME "ar9331" #define AR9331_NAME "ar9331"
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "dsa_priv.h" #include "tag.h"
#define BRCM_NAME "brcm" #define BRCM_NAME "brcm"
#define BRCM_LEGACY_NAME "brcm-legacy" #define BRCM_LEGACY_NAME "brcm-legacy"
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "dsa_priv.h" #include "tag.h"
#define DSA_NAME "dsa" #define DSA_NAME "dsa"
#define EDSA_NAME "edsa" #define EDSA_NAME "edsa"
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <net/dsa.h> #include <net/dsa.h>
#include "dsa_priv.h" #include "tag.h"
#define GSWIP_NAME "gswip" #define GSWIP_NAME "gswip"
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <net/dsa.h> #include <net/dsa.h>
#include "dsa_priv.h" #include "dsa_priv.h"
#include "tag.h"
#define HELLCREEK_NAME "hellcreek" #define HELLCREEK_NAME "hellcreek"
......
...@@ -7,7 +7,8 @@ ...@@ -7,7 +7,8 @@
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/list.h> #include <linux/list.h>
#include <net/dsa.h> #include <net/dsa.h>
#include "dsa_priv.h"
#include "tag.h"
#define KSZ8795_NAME "ksz8795" #define KSZ8795_NAME "ksz8795"
#define KSZ9477_NAME "ksz9477" #define KSZ9477_NAME "ksz9477"
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "dsa_priv.h" #include "tag.h"
/* To define the outgoing port and to discover the incoming port a regular /* To define the outgoing port and to discover the incoming port a regular
* VLAN tag is used by the LAN9303. But its VID meaning is 'special': * VLAN tag is used by the LAN9303. But its VID meaning is 'special':
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include "dsa_priv.h" #include "tag.h"
#define MTK_NAME "mtk" #define MTK_NAME "mtk"
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* tagging support, look at tag_8021q.c instead. * tagging support, look at tag_8021q.c instead.
*/ */
#include "dsa_priv.h" #include "tag.h"
#define NONE_NAME "none" #define NONE_NAME "none"
......
...@@ -2,7 +2,8 @@ ...@@ -2,7 +2,8 @@
/* Copyright 2019 NXP /* Copyright 2019 NXP
*/ */
#include <linux/dsa/ocelot.h> #include <linux/dsa/ocelot.h>
#include "dsa_priv.h"
#include "tag.h"
#define OCELOT_NAME "ocelot" #define OCELOT_NAME "ocelot"
#define SEVILLE_NAME "seville" #define SEVILLE_NAME "seville"
......
...@@ -10,7 +10,8 @@ ...@@ -10,7 +10,8 @@
*/ */
#include <linux/dsa/8021q.h> #include <linux/dsa/8021q.h>
#include <linux/dsa/ocelot.h> #include <linux/dsa/ocelot.h>
#include "dsa_priv.h"
#include "tag.h"
#define OCELOT_8021Q_NAME "ocelot-8021q" #define OCELOT_8021Q_NAME "ocelot-8021q"
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <net/dsa.h> #include <net/dsa.h>
#include <linux/dsa/tag_qca.h> #include <linux/dsa/tag_qca.h>
#include "dsa_priv.h" #include "tag.h"
#define QCA_NAME "qca" #define QCA_NAME "qca"
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/bits.h> #include <linux/bits.h>
#include "dsa_priv.h" #include "tag.h"
#define RTL4_A_NAME "rtl4a" #define RTL4_A_NAME "rtl4a"
......
...@@ -77,7 +77,7 @@ ...@@ -77,7 +77,7 @@
#include <linux/bits.h> #include <linux/bits.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include "dsa_priv.h" #include "tag.h"
/* Protocols supported: /* Protocols supported:
* *
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <net/dsa.h> #include <net/dsa.h>
#include "dsa_priv.h" #include "tag.h"
/* To define the outgoing port and to discover the incoming port a TAG is /* To define the outgoing port and to discover the incoming port a TAG is
* inserted after Src MAC : * inserted after Src MAC :
......
...@@ -5,7 +5,8 @@ ...@@ -5,7 +5,8 @@
#include <linux/dsa/sja1105.h> #include <linux/dsa/sja1105.h>
#include <linux/dsa/8021q.h> #include <linux/dsa/8021q.h>
#include <linux/packing.h> #include <linux/packing.h>
#include "dsa_priv.h"
#include "tag.h"
#define SJA1105_NAME "sja1105" #define SJA1105_NAME "sja1105"
#define SJA1110_NAME "sja1110" #define SJA1110_NAME "sja1110"
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "dsa_priv.h" #include "tag.h"
#define TRAILER_NAME "trailer" #define TRAILER_NAME "trailer"
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include "dsa_priv.h" #include "tag.h"
#define XRS700X_NAME "xrs700x" #define XRS700X_NAME "xrs700x"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment