Commit a68578c2 authored by Vladimir Oltean's avatar Vladimir Oltean Committed by David S. Miller

net: dsa: Make deferred_xmit private to sja1105

There are 3 things that are wrong with the DSA deferred xmit mechanism:

1. Its introduction has made the DSA hotpath ever so slightly more
   inefficient for everybody, since DSA_SKB_CB(skb)->deferred_xmit needs
   to be initialized to false for every transmitted frame, in order to
   figure out whether the driver requested deferral or not (a very rare
   occasion, rare even for the only driver that does use this mechanism:
   sja1105). That was necessary to avoid kfree_skb from freeing the skb.

2. Because L2 PTP is a link-local protocol like STP, it requires
   management routes and deferred xmit with this switch. But as opposed
   to STP, the deferred work mechanism needs to schedule the packet
   rather quickly for the TX timstamp to be collected in time and sent
   to user space. But there is no provision for controlling the
   scheduling priority of this deferred xmit workqueue. Too bad this is
   a rather specific requirement for a feature that nobody else uses
   (more below).

3. Perhaps most importantly, it makes the DSA core adhere a bit too
   much to the NXP company-wide policy "Innovate Where It Doesn't
   Matter". The sja1105 is probably the only DSA switch that requires
   some frames sent from the CPU to be routed to the slave port via an
   out-of-band configuration (register write) rather than in-band (DSA
   tag). And there are indeed very good reasons to not want to do that:
   if that out-of-band register is at the other end of a slow bus such
   as SPI, then you limit that Ethernet flow's throughput to effectively
   the throughput of the SPI bus. So hardware vendors should definitely
   not be encouraged to design this way. We do _not_ want more
   widespread use of this mechanism.

Luckily we have a solution for each of the 3 issues:

For 1, we can just remove that variable in the skb->cb and counteract
the effect of kfree_skb with skb_get, much to the same effect. The
advantage, of course, being that anybody who doesn't use deferred xmit
doesn't need to do any extra operation in the hotpath.

For 2, we can create a kernel thread for each port's deferred xmit work.
If the user switch ports are named swp0, swp1, swp2, the kernel threads
will be named swp0_xmit, swp1_xmit, swp2_xmit (there appears to be a 15
character length limit on kernel thread names). With this, the user can
change the scheduling priority with chrt $(pidof swp2_xmit).

For 3, we can actually move the entire implementation to the sja1105
driver.

So this patch deletes the generic implementation from the DSA core and
adds a new one, more adequate to the requirements of PTP TX
timestamping, in sja1105_main.c.
Suggested-by: default avatarFlorian Fainelli <f.fainelli@gmail.com>
Signed-off-by: default avatarVladimir Oltean <olteanv@gmail.com>
Reviewed-by: default avatarFlorian Fainelli <f.fainelli@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0a51826c
...@@ -1732,6 +1732,16 @@ static int sja1105_setup(struct dsa_switch *ds) ...@@ -1732,6 +1732,16 @@ static int sja1105_setup(struct dsa_switch *ds)
static void sja1105_teardown(struct dsa_switch *ds) static void sja1105_teardown(struct dsa_switch *ds)
{ {
struct sja1105_private *priv = ds->priv; struct sja1105_private *priv = ds->priv;
int port;
for (port = 0; port < SJA1105_NUM_PORTS; port++) {
struct sja1105_port *sp = &priv->ports[port];
if (!dsa_is_user_port(ds, port))
continue;
kthread_destroy_worker(sp->xmit_worker);
}
sja1105_tas_teardown(ds); sja1105_tas_teardown(ds);
sja1105_ptp_clock_unregister(ds); sja1105_ptp_clock_unregister(ds);
...@@ -1753,6 +1763,18 @@ static int sja1105_port_enable(struct dsa_switch *ds, int port, ...@@ -1753,6 +1763,18 @@ static int sja1105_port_enable(struct dsa_switch *ds, int port,
return 0; return 0;
} }
static void sja1105_port_disable(struct dsa_switch *ds, int port)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_port *sp = &priv->ports[port];
if (!dsa_is_user_port(ds, port))
return;
kthread_cancel_work_sync(&sp->xmit_work);
skb_queue_purge(&sp->xmit_queue);
}
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
struct sk_buff *skb, bool takets) struct sk_buff *skb, bool takets)
{ {
...@@ -1811,31 +1833,36 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, ...@@ -1811,31 +1833,36 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
#define work_to_port(work) \
container_of((work), struct sja1105_port, xmit_work)
#define tagger_to_sja1105(t) \
container_of((t), struct sja1105_private, tagger_data)
/* Deferred work is unfortunately necessary because setting up the management /* Deferred work is unfortunately necessary because setting up the management
* route cannot be done from atomit context (SPI transfer takes a sleepable * route cannot be done from atomit context (SPI transfer takes a sleepable
* lock on the bus) * lock on the bus)
*/ */
static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port, static void sja1105_port_deferred_xmit(struct kthread_work *work)
struct sk_buff *skb)
{ {
struct sja1105_private *priv = ds->priv; struct sja1105_port *sp = work_to_port(work);
struct sk_buff *clone; struct sja1105_tagger_data *tagger_data = sp->data;
struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
int port = sp - priv->ports;
struct sk_buff *skb;
mutex_lock(&priv->mgmt_lock); while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
/* The clone, if there, was made by dsa_skb_tx_timestamp */ mutex_lock(&priv->mgmt_lock);
clone = DSA_SKB_CB(skb)->clone;
sja1105_mgmt_xmit(ds, port, 0, skb, !!clone); sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
if (!clone) /* The clone, if there, was made by dsa_skb_tx_timestamp */
goto out; if (clone)
sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
sja1105_ptp_txtstamp_skb(ds, port, clone);
out:
mutex_unlock(&priv->mgmt_lock); mutex_unlock(&priv->mgmt_lock);
return NETDEV_TX_OK; }
} }
/* The MAXAGE setting belongs to the L2 Forwarding Parameters table, /* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
...@@ -1966,6 +1993,7 @@ static const struct dsa_switch_ops sja1105_switch_ops = { ...@@ -1966,6 +1993,7 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.get_sset_count = sja1105_get_sset_count, .get_sset_count = sja1105_get_sset_count,
.get_ts_info = sja1105_get_ts_info, .get_ts_info = sja1105_get_ts_info,
.port_enable = sja1105_port_enable, .port_enable = sja1105_port_enable,
.port_disable = sja1105_port_disable,
.port_fdb_dump = sja1105_fdb_dump, .port_fdb_dump = sja1105_fdb_dump,
.port_fdb_add = sja1105_fdb_add, .port_fdb_add = sja1105_fdb_add,
.port_fdb_del = sja1105_fdb_del, .port_fdb_del = sja1105_fdb_del,
...@@ -1979,7 +2007,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = { ...@@ -1979,7 +2007,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_mdb_prepare = sja1105_mdb_prepare, .port_mdb_prepare = sja1105_mdb_prepare,
.port_mdb_add = sja1105_mdb_add, .port_mdb_add = sja1105_mdb_add,
.port_mdb_del = sja1105_mdb_del, .port_mdb_del = sja1105_mdb_del,
.port_deferred_xmit = sja1105_port_deferred_xmit,
.port_hwtstamp_get = sja1105_hwtstamp_get, .port_hwtstamp_get = sja1105_hwtstamp_get,
.port_hwtstamp_set = sja1105_hwtstamp_set, .port_hwtstamp_set = sja1105_hwtstamp_set,
.port_rxtstamp = sja1105_port_rxtstamp, .port_rxtstamp = sja1105_port_rxtstamp,
...@@ -2031,7 +2058,7 @@ static int sja1105_probe(struct spi_device *spi) ...@@ -2031,7 +2058,7 @@ static int sja1105_probe(struct spi_device *spi)
struct device *dev = &spi->dev; struct device *dev = &spi->dev;
struct sja1105_private *priv; struct sja1105_private *priv;
struct dsa_switch *ds; struct dsa_switch *ds;
int rc, i; int rc, port;
if (!dev->of_node) { if (!dev->of_node) {
dev_err(dev, "No DTS bindings for SJA1105 driver\n"); dev_err(dev, "No DTS bindings for SJA1105 driver\n");
...@@ -2096,15 +2123,42 @@ static int sja1105_probe(struct spi_device *spi) ...@@ -2096,15 +2123,42 @@ static int sja1105_probe(struct spi_device *spi)
return rc; return rc;
/* Connections between dsa_port and sja1105_port */ /* Connections between dsa_port and sja1105_port */
for (i = 0; i < SJA1105_NUM_PORTS; i++) { for (port = 0; port < SJA1105_NUM_PORTS; port++) {
struct sja1105_port *sp = &priv->ports[i]; struct sja1105_port *sp = &priv->ports[port];
struct dsa_port *dp = dsa_to_port(ds, port);
struct net_device *slave;
dsa_to_port(ds, i)->priv = sp; if (!dsa_is_user_port(ds, port))
sp->dp = dsa_to_port(ds, i); continue;
dp->priv = sp;
sp->dp = dp;
sp->data = tagger_data; sp->data = tagger_data;
slave = dp->slave;
kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
slave->name);
if (IS_ERR(sp->xmit_worker)) {
rc = PTR_ERR(sp->xmit_worker);
dev_err(ds->dev,
"failed to create deferred xmit thread: %d\n",
rc);
goto out;
}
skb_queue_head_init(&sp->xmit_queue);
} }
return 0; return 0;
out:
while (port-- > 0) {
struct sja1105_port *sp = &priv->ports[port];
if (!dsa_is_user_port(ds, port))
continue;
kthread_destroy_worker(sp->xmit_worker);
}
return rc;
} }
static int sja1105_remove(struct spi_device *spi) static int sja1105_remove(struct spi_device *spi)
......
...@@ -53,6 +53,9 @@ struct sja1105_skb_cb { ...@@ -53,6 +53,9 @@ struct sja1105_skb_cb {
((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb)) ((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb))
struct sja1105_port { struct sja1105_port {
struct kthread_worker *xmit_worker;
struct kthread_work xmit_work;
struct sk_buff_head xmit_queue;
struct sja1105_tagger_data *data; struct sja1105_tagger_data *data;
struct dsa_port *dp; struct dsa_port *dp;
bool hwts_tx_en; bool hwts_tx_en;
......
...@@ -90,7 +90,6 @@ struct dsa_device_ops { ...@@ -90,7 +90,6 @@ struct dsa_device_ops {
struct dsa_skb_cb { struct dsa_skb_cb {
struct sk_buff *clone; struct sk_buff *clone;
bool deferred_xmit;
}; };
struct __dsa_skb_cb { struct __dsa_skb_cb {
...@@ -192,9 +191,6 @@ struct dsa_port { ...@@ -192,9 +191,6 @@ struct dsa_port {
struct phylink *pl; struct phylink *pl;
struct phylink_config pl_config; struct phylink_config pl_config;
struct work_struct xmit_work;
struct sk_buff_head xmit_queue;
struct list_head list; struct list_head list;
/* /*
...@@ -564,11 +560,6 @@ struct dsa_switch_ops { ...@@ -564,11 +560,6 @@ struct dsa_switch_ops {
bool (*port_rxtstamp)(struct dsa_switch *ds, int port, bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type); struct sk_buff *skb, unsigned int type);
/*
* Deferred frame Tx
*/
netdev_tx_t (*port_deferred_xmit)(struct dsa_switch *ds, int port,
struct sk_buff *skb);
/* Devlink parameters */ /* Devlink parameters */
int (*devlink_param_get)(struct dsa_switch *ds, u32 id, int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx); struct devlink_param_gset_ctx *ctx);
......
...@@ -162,8 +162,6 @@ int dsa_slave_resume(struct net_device *slave_dev); ...@@ -162,8 +162,6 @@ int dsa_slave_resume(struct net_device *slave_dev);
int dsa_slave_register_notifier(void); int dsa_slave_register_notifier(void);
void dsa_slave_unregister_notifier(void); void dsa_slave_unregister_notifier(void);
void *dsa_defer_xmit(struct sk_buff *skb, struct net_device *dev);
static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev) static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
{ {
struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_slave_priv *p = netdev_priv(dev);
......
...@@ -116,9 +116,6 @@ static int dsa_slave_close(struct net_device *dev) ...@@ -116,9 +116,6 @@ static int dsa_slave_close(struct net_device *dev)
struct net_device *master = dsa_slave_to_master(dev); struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev); struct dsa_port *dp = dsa_slave_to_port(dev);
cancel_work_sync(&dp->xmit_work);
skb_queue_purge(&dp->xmit_queue);
phylink_stop(dp->pl); phylink_stop(dp->pl);
dsa_port_disable(dp); dsa_port_disable(dp);
...@@ -518,7 +515,6 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -518,7 +515,6 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
s->tx_bytes += skb->len; s->tx_bytes += skb->len;
u64_stats_update_end(&s->syncp); u64_stats_update_end(&s->syncp);
DSA_SKB_CB(skb)->deferred_xmit = false;
DSA_SKB_CB(skb)->clone = NULL; DSA_SKB_CB(skb)->clone = NULL;
/* Identify PTP protocol packets, clone them, and pass them to the /* Identify PTP protocol packets, clone them, and pass them to the
...@@ -531,7 +527,6 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -531,7 +527,6 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
*/ */
nskb = p->xmit(skb, dev); nskb = p->xmit(skb, dev);
if (!nskb) { if (!nskb) {
if (!DSA_SKB_CB(skb)->deferred_xmit)
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -539,31 +534,6 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -539,31 +534,6 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
return dsa_enqueue_skb(nskb, dev); return dsa_enqueue_skb(nskb, dev);
} }
void *dsa_defer_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
DSA_SKB_CB(skb)->deferred_xmit = true;
skb_queue_tail(&dp->xmit_queue, skb);
schedule_work(&dp->xmit_work);
return NULL;
}
EXPORT_SYMBOL_GPL(dsa_defer_xmit);
static void dsa_port_xmit_work(struct work_struct *work)
{
struct dsa_port *dp = container_of(work, struct dsa_port, xmit_work);
struct dsa_switch *ds = dp->ds;
struct sk_buff *skb;
if (unlikely(!ds->ops->port_deferred_xmit))
return;
while ((skb = skb_dequeue(&dp->xmit_queue)) != NULL)
ds->ops->port_deferred_xmit(ds, dp->index, skb);
}
/* ethtool operations *******************************************************/ /* ethtool operations *******************************************************/
static void dsa_slave_get_drvinfo(struct net_device *dev, static void dsa_slave_get_drvinfo(struct net_device *dev,
...@@ -1367,9 +1337,6 @@ int dsa_slave_suspend(struct net_device *slave_dev) ...@@ -1367,9 +1337,6 @@ int dsa_slave_suspend(struct net_device *slave_dev)
if (!netif_running(slave_dev)) if (!netif_running(slave_dev))
return 0; return 0;
cancel_work_sync(&dp->xmit_work);
skb_queue_purge(&dp->xmit_queue);
netif_device_detach(slave_dev); netif_device_detach(slave_dev);
rtnl_lock(); rtnl_lock();
...@@ -1455,8 +1422,6 @@ int dsa_slave_create(struct dsa_port *port) ...@@ -1455,8 +1422,6 @@ int dsa_slave_create(struct dsa_port *port)
} }
p->dp = port; p->dp = port;
INIT_LIST_HEAD(&p->mall_tc_list); INIT_LIST_HEAD(&p->mall_tc_list);
INIT_WORK(&port->xmit_work, dsa_port_xmit_work);
skb_queue_head_init(&port->xmit_queue);
p->xmit = cpu_dp->tag_ops->xmit; p->xmit = cpu_dp->tag_ops->xmit;
port->slave = slave_dev; port->slave = slave_dev;
......
...@@ -83,6 +83,19 @@ static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev) ...@@ -83,6 +83,19 @@ static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev)
return false; return false;
} }
/* Calls sja1105_port_deferred_xmit in sja1105_main.c */
static struct sk_buff *sja1105_defer_xmit(struct sja1105_port *sp,
struct sk_buff *skb)
{
/* Increase refcount so the kfree_skb in dsa_slave_xmit
* won't really free the packet.
*/
skb_queue_tail(&sp->xmit_queue, skb_get(skb));
kthread_queue_work(sp->xmit_worker, &sp->xmit_work);
return NULL;
}
static struct sk_buff *sja1105_xmit(struct sk_buff *skb, static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
struct net_device *netdev) struct net_device *netdev)
{ {
...@@ -97,7 +110,7 @@ static struct sk_buff *sja1105_xmit(struct sk_buff *skb, ...@@ -97,7 +110,7 @@ static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
* is the .port_deferred_xmit driver callback. * is the .port_deferred_xmit driver callback.
*/ */
if (unlikely(sja1105_is_link_local(skb))) if (unlikely(sja1105_is_link_local(skb)))
return dsa_defer_xmit(skb, netdev); return sja1105_defer_xmit(dp->priv, skb);
/* If we are under a vlan_filtering bridge, IP termination on /* If we are under a vlan_filtering bridge, IP termination on
* switch ports based on 802.1Q tags is simply too brittle to * switch ports based on 802.1Q tags is simply too brittle to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment