Commit 6a06c2f7 authored by Vlad Buslov's avatar Vlad Buslov Committed by Saeed Mahameed

net/mlx5e: Refactor neigh used value update for concurrent execution

In order to remove dependency on rtnl lock and allow neigh used value
update workqueue task to execute concurrently with tc, refactor
mlx5e_tc_update_neigh_used_value() for concurrent execution:

- Lock encap table when accessing encap entry to prevent concurrent
  changes.

- Save offloaded encap flows to temporary list and release them after encap
  entry is updated. Add mlx5e_put_encap_flow_list() helper which is
  intended to be shared with neigh update code in following patch in this
  series. This is necessary because mlx5e_flow_put() can't be called while
  holding encap_tbl_lock.
Signed-off-by: default avatarVlad Buslov <vladbu@mellanox.com>
Reviewed-by: default avatarRoi Dayan <roid@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent ac0d9176
......@@ -126,6 +126,7 @@ struct mlx5e_tc_flow {
struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */
struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
struct list_head tmp_list; /* temporary flow list used by neigh update */
refcount_t refcnt;
struct rcu_head rcu_head;
union {
......@@ -1412,6 +1413,15 @@ static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
return flow->nic_attr->counter;
}
/* Iterate over tmp_list of flows attached to flow_list head. */
static void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
{
struct mlx5e_tc_flow *flow, *tmp;
list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
mlx5e_flow_put(priv, flow);
}
static struct mlx5e_encap_entry *
mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
struct mlx5e_encap_entry *e)
......@@ -1481,30 +1491,35 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
* next one.
*/
while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
struct mlx5e_priv *priv = netdev_priv(e->out_dev);
struct encap_flow_item *efi, *tmp;
struct mlx5_eswitch *esw;
LIST_HEAD(flow_list);
esw = priv->mdev->priv.eswitch;
mutex_lock(&esw->offloads.encap_tbl_lock);
list_for_each_entry_safe(efi, tmp, &e->flows, list) {
flow = container_of(efi, struct mlx5e_tc_flow,
encaps[efi->index]);
if (IS_ERR(mlx5e_flow_get(flow)))
continue;
list_add(&flow->tmp_list, &flow_list);
if (mlx5e_is_offloaded_flow(flow)) {
counter = mlx5e_tc_get_counter(flow);
lastuse = mlx5_fc_query_lastuse(counter);
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
mlx5e_flow_put(netdev_priv(e->out_dev), flow);
neigh_used = true;
break;
}
}
mlx5e_flow_put(netdev_priv(e->out_dev), flow);
}
mutex_unlock(&esw->offloads.encap_tbl_lock);
mlx5e_put_encap_flow_list(priv, &flow_list);
if (neigh_used) {
/* release current encap before breaking the loop */
mlx5e_encap_put(netdev_priv(e->out_dev), e);
mlx5e_encap_put(priv, e);
break;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment