Commit d38049bb authored by Vladimir Oltean's avatar Vladimir Oltean Committed by David S. Miller

net: dsa: sja1105: bring deferred xmit implementation in line with ocelot-8021q

When the ocelot-8021q driver was converted to deferred xmit as part of
commit 8d5f7954 ("net: dsa: felix: break at first CPU port during
init and teardown"), the deferred implementation was deliberately made
subtly different from what sja1105 has.

The implementation differences lied on the following observations:

- There might be a race between these two lines in tag_sja1105.c:

       skb_queue_tail(&sp->xmit_queue, skb_get(skb));
       kthread_queue_work(sp->xmit_worker, &sp->xmit_work);

  and the skb dequeue logic in sja1105_port_deferred_xmit(). For
  example, the xmit_work might be already queued, however the work item
  has just finished walking through the skb queue. Because we don't
  check the return code from kthread_queue_work, we don't do anything if
  the work item is already queued.

  However, nobody will take that skb and send it, at least until the
  next timestampable skb is sent. This creates additional (and
  avoidable) TX timestamping latency.

  To close that race, what the ocelot-8021q driver does is it doesn't
  keep a single work item per port, and a skb timestamping queue, but
  rather dynamically allocates a work item per packet.

- It is also unnecessary to have more than one kthread that does the
  work. So delete the per-port kthread allocations and replace them with
  a single kthread which is global to the switch.

This change brings the two implementations in line by applying those
observations to the sja1105 driver as well.
Signed-off-by: default avatarVladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a3d74295
...@@ -2675,10 +2675,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, ...@@ -2675,10 +2675,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
#define work_to_port(work) \ #define work_to_xmit_work(w) \
container_of((work), struct sja1105_port, xmit_work) container_of((w), struct sja1105_deferred_xmit_work, work)
#define tagger_to_sja1105(t) \
container_of((t), struct sja1105_private, tagger_data)
/* Deferred work is unfortunately necessary because setting up the management /* Deferred work is unfortunately necessary because setting up the management
* route cannot be done from atomit context (SPI transfer takes a sleepable * route cannot be done from atomit context (SPI transfer takes a sleepable
...@@ -2686,25 +2684,25 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, ...@@ -2686,25 +2684,25 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
*/ */
static void sja1105_port_deferred_xmit(struct kthread_work *work) static void sja1105_port_deferred_xmit(struct kthread_work *work)
{ {
struct sja1105_port *sp = work_to_port(work); struct sja1105_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
struct sja1105_tagger_data *tagger_data = sp->data; struct sk_buff *clone, *skb = xmit_work->skb;
struct sja1105_private *priv = tagger_to_sja1105(tagger_data); struct dsa_switch *ds = xmit_work->dp->ds;
int port = sp - priv->ports; struct sja1105_private *priv = ds->priv;
struct sk_buff *skb; int port = xmit_work->dp->index;
while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) { clone = SJA1105_SKB_CB(skb)->clone;
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
mutex_lock(&priv->mgmt_lock); mutex_lock(&priv->mgmt_lock);
sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone); sja1105_mgmt_xmit(ds, port, 0, skb, !!clone);
/* The clone, if there, was made by dsa_skb_tx_timestamp */ /* The clone, if there, was made by dsa_skb_tx_timestamp */
if (clone) if (clone)
sja1105_ptp_txtstamp_skb(priv->ds, port, clone); sja1105_ptp_txtstamp_skb(ds, port, clone);
mutex_unlock(&priv->mgmt_lock); mutex_unlock(&priv->mgmt_lock);
}
kfree(xmit_work);
} }
/* The MAXAGE setting belongs to the L2 Forwarding Parameters table, /* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
...@@ -3009,54 +3007,43 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port, ...@@ -3009,54 +3007,43 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
static void sja1105_teardown_ports(struct sja1105_private *priv) static void sja1105_teardown_ports(struct sja1105_private *priv)
{ {
struct dsa_switch *ds = priv->ds; struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
int port;
for (port = 0; port < ds->num_ports; port++) {
struct sja1105_port *sp = &priv->ports[port];
if (sp->xmit_worker) kthread_destroy_worker(tagger_data->xmit_worker);
kthread_destroy_worker(sp->xmit_worker);
}
} }
static int sja1105_setup_ports(struct sja1105_private *priv) static int sja1105_setup_ports(struct sja1105_private *priv)
{ {
struct sja1105_tagger_data *tagger_data = &priv->tagger_data; struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
struct dsa_switch *ds = priv->ds; struct dsa_switch *ds = priv->ds;
int port, rc; struct kthread_worker *worker;
int port;
worker = kthread_create_worker(0, "dsa%d:%d_xmit", ds->dst->index,
ds->index);
if (IS_ERR(worker)) {
dev_err(ds->dev,
"failed to create deferred xmit thread: %pe\n",
worker);
return PTR_ERR(worker);
}
tagger_data->xmit_worker = worker;
tagger_data->xmit_work_fn = sja1105_port_deferred_xmit;
/* Connections between dsa_port and sja1105_port */ /* Connections between dsa_port and sja1105_port */
for (port = 0; port < ds->num_ports; port++) { for (port = 0; port < ds->num_ports; port++) {
struct sja1105_port *sp = &priv->ports[port]; struct sja1105_port *sp = &priv->ports[port];
struct dsa_port *dp = dsa_to_port(ds, port); struct dsa_port *dp = dsa_to_port(ds, port);
struct kthread_worker *worker;
struct net_device *slave;
if (!dsa_port_is_user(dp)) if (!dsa_port_is_user(dp))
continue; continue;
dp->priv = sp; dp->priv = sp;
sp->data = tagger_data; sp->data = tagger_data;
slave = dp->slave;
kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
worker = kthread_create_worker(0, "%s_xmit", slave->name);
if (IS_ERR(worker)) {
rc = PTR_ERR(worker);
dev_err(ds->dev,
"failed to create deferred xmit thread: %d\n",
rc);
goto out_destroy_workers;
}
sp->xmit_worker = worker;
skb_queue_head_init(&sp->xmit_queue);
} }
return 0; return 0;
out_destroy_workers:
sja1105_teardown_ports(priv);
return rc;
} }
/* The programming model for the SJA1105 switch is "all-at-once" via static /* The programming model for the SJA1105 switch is "all-at-once" via static
......
...@@ -37,6 +37,12 @@ ...@@ -37,6 +37,12 @@
#define SJA1105_HWTS_RX_EN 0 #define SJA1105_HWTS_RX_EN 0
struct sja1105_deferred_xmit_work {
struct dsa_port *dp;
struct sk_buff *skb;
struct kthread_work work;
};
/* Global tagger data: each struct sja1105_port has a reference to /* Global tagger data: each struct sja1105_port has a reference to
* the structure defined in struct sja1105_private. * the structure defined in struct sja1105_private.
*/ */
...@@ -52,6 +58,8 @@ struct sja1105_tagger_data { ...@@ -52,6 +58,8 @@ struct sja1105_tagger_data {
* 2-step TX timestamps * 2-step TX timestamps
*/ */
struct sk_buff_head skb_txtstamp_queue; struct sk_buff_head skb_txtstamp_queue;
struct kthread_worker *xmit_worker;
void (*xmit_work_fn)(struct kthread_work *work);
}; };
struct sja1105_skb_cb { struct sja1105_skb_cb {
...@@ -65,9 +73,6 @@ struct sja1105_skb_cb { ...@@ -65,9 +73,6 @@ struct sja1105_skb_cb {
((struct sja1105_skb_cb *)((skb)->cb)) ((struct sja1105_skb_cb *)((skb)->cb))
struct sja1105_port { struct sja1105_port {
struct kthread_worker *xmit_worker;
struct kthread_work xmit_work;
struct sk_buff_head xmit_queue;
struct sja1105_tagger_data *data; struct sja1105_tagger_data *data;
bool hwts_tx_en; bool hwts_tx_en;
}; };
......
...@@ -125,16 +125,29 @@ static inline bool sja1105_is_meta_frame(const struct sk_buff *skb) ...@@ -125,16 +125,29 @@ static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp, static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp,
struct sk_buff *skb) struct sk_buff *skb)
{ {
void (*xmit_work_fn)(struct kthread_work *work);
struct sja1105_deferred_xmit_work *xmit_work;
struct sja1105_port *sp = dp->priv; struct sja1105_port *sp = dp->priv;
struct kthread_worker *xmit_worker;
if (!dsa_port_is_sja1105(dp)) xmit_work_fn = sp->data->xmit_work_fn;
return skb; xmit_worker = sp->data->xmit_worker;
if (!xmit_work_fn || !xmit_worker)
return NULL;
xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
if (!xmit_work)
return NULL;
kthread_init_work(&xmit_work->work, xmit_work_fn);
/* Increase refcount so the kfree_skb in dsa_slave_xmit /* Increase refcount so the kfree_skb in dsa_slave_xmit
* won't really free the packet. * won't really free the packet.
*/ */
skb_queue_tail(&sp->xmit_queue, skb_get(skb)); xmit_work->dp = dp;
kthread_queue_work(sp->xmit_worker, &sp->xmit_work); xmit_work->skb = skb_get(skb);
kthread_queue_work(xmit_worker, &xmit_work->work);
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment