Commit 83033688 authored by Vlad Buslov's avatar Vlad Buslov Committed by Saeed Mahameed

net/mlx5: Change flow counters addlist type to single linked list

In order to prevent flow counters stats work function from traversing whole
flow counters tree while searching for deleted flow counters, new list to
store deleted flow counters will be added to struct mlx5_fc_stats. However,
the flow counter structure itself has no space left to store any more data
in first cache line. To free space that is needed to store additional list
node, convert current addlist double linked list (two pointers per node) to
atomic single linked list (one pointer per node).

Lockless NULL-terminated single linked list data type doesn't require any
additional external synchronization for operations used by flow counters
module (add single new element, remove all elements from list and traverse
them). Remove addlist_lock that is no longer needed.
Signed-off-by: default avatarVlad Buslov <vladbu@mellanox.com>
Acked-by: default avatarAmir Vadai <amir@vadai.me>
Reviewed-by: default avatarPaul Blakey <paulb@mellanox.com>
Reviewed-by: default avatarRoi Dayan <roid@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 05dcc712
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/rhashtable.h> #include <linux/rhashtable.h>
#include <linux/llist.h>
enum fs_node_type { enum fs_node_type {
FS_TYPE_NAMESPACE, FS_TYPE_NAMESPACE,
...@@ -139,7 +140,7 @@ struct mlx5_fc_cache { ...@@ -139,7 +140,7 @@ struct mlx5_fc_cache {
struct mlx5_fc { struct mlx5_fc {
struct rb_node node; struct rb_node node;
struct list_head list; struct llist_node addlist;
/* last{packets,bytes} members are used when calculating the delta since /* last{packets,bytes} members are used when calculating the delta since
* last reading * last reading
......
...@@ -52,7 +52,9 @@ ...@@ -52,7 +52,9 @@
* access to counter list: * access to counter list:
* - create (user context) * - create (user context)
* - mlx5_fc_create() only adds to an addlist to be used by * - mlx5_fc_create() only adds to an addlist to be used by
* mlx5_fc_stats_query_work(). addlist is protected by a spinlock. * mlx5_fc_stats_query_work(). addlist is a lockless single linked list
* that doesn't require any additional synchronization when adding single
* node.
* - spawn thread to do the actual destroy * - spawn thread to do the actual destroy
* *
* - destroy (user context) * - destroy (user context)
...@@ -156,28 +158,29 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, ...@@ -156,28 +158,29 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
return node; return node;
} }
static void mlx5_free_fc(struct mlx5_core_dev *dev,
struct mlx5_fc *counter)
{
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
}
static void mlx5_fc_stats_work(struct work_struct *work) static void mlx5_fc_stats_work(struct work_struct *work)
{ {
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
priv.fc_stats.work.work); priv.fc_stats.work.work);
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct llist_node *tmplist = llist_del_all(&fc_stats->addlist);
unsigned long now = jiffies; unsigned long now = jiffies;
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
struct mlx5_fc *last = NULL; struct mlx5_fc *last = NULL;
struct rb_node *node; struct rb_node *node;
LIST_HEAD(tmplist);
spin_lock(&fc_stats->addlist_lock);
list_splice_tail_init(&fc_stats->addlist, &tmplist); if (tmplist || !RB_EMPTY_ROOT(&fc_stats->counters))
if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
queue_delayed_work(fc_stats->wq, &fc_stats->work, queue_delayed_work(fc_stats->wq, &fc_stats->work,
fc_stats->sampling_interval); fc_stats->sampling_interval);
spin_unlock(&fc_stats->addlist_lock); llist_for_each_entry(counter, tmplist, addlist)
list_for_each_entry(counter, &tmplist, list)
mlx5_fc_stats_insert(&fc_stats->counters, counter); mlx5_fc_stats_insert(&fc_stats->counters, counter);
node = rb_first(&fc_stats->counters); node = rb_first(&fc_stats->counters);
...@@ -229,9 +232,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) ...@@ -229,9 +232,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
counter->cache.lastuse = jiffies; counter->cache.lastuse = jiffies;
counter->aging = true; counter->aging = true;
spin_lock(&fc_stats->addlist_lock); llist_add(&counter->addlist, &fc_stats->addlist);
list_add(&counter->list, &fc_stats->addlist);
spin_unlock(&fc_stats->addlist_lock);
mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
} }
...@@ -268,8 +269,7 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) ...@@ -268,8 +269,7 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
fc_stats->counters = RB_ROOT; fc_stats->counters = RB_ROOT;
INIT_LIST_HEAD(&fc_stats->addlist); init_llist_head(&fc_stats->addlist);
spin_lock_init(&fc_stats->addlist_lock);
fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq) if (!fc_stats->wq)
...@@ -284,6 +284,7 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) ...@@ -284,6 +284,7 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
{ {
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct llist_node *tmplist;
struct mlx5_fc *counter; struct mlx5_fc *counter;
struct mlx5_fc *tmp; struct mlx5_fc *tmp;
struct rb_node *node; struct rb_node *node;
...@@ -292,13 +293,9 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) ...@@ -292,13 +293,9 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
destroy_workqueue(dev->priv.fc_stats.wq); destroy_workqueue(dev->priv.fc_stats.wq);
dev->priv.fc_stats.wq = NULL; dev->priv.fc_stats.wq = NULL;
list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) { tmplist = llist_del_all(&fc_stats->addlist);
list_del(&counter->list); llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
mlx5_free_fc(dev, counter);
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
}
node = rb_first(&fc_stats->counters); node = rb_first(&fc_stats->counters);
while (node) { while (node) {
...@@ -308,9 +305,7 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) ...@@ -308,9 +305,7 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
rb_erase(&counter->node, &fc_stats->counters); rb_erase(&counter->node, &fc_stats->counters);
mlx5_cmd_fc_free(dev, counter->id); mlx5_free_fc(dev, counter);
kfree(counter);
} }
} }
......
...@@ -584,9 +584,7 @@ struct mlx5_irq_info { ...@@ -584,9 +584,7 @@ struct mlx5_irq_info {
struct mlx5_fc_stats { struct mlx5_fc_stats {
struct rb_root counters; struct rb_root counters;
struct list_head addlist; struct llist_head addlist;
/* protect addlist add/splice operations */
spinlock_t addlist_lock;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct delayed_work work; struct delayed_work work;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment