Commit fd785e52 authored by Yevgeny Kliteynik's avatar Yevgeny Kliteynik Committed by Saeed Mahameed

net/mlx5: DR, Allocate icm_chunks from their own slab allocator

SW steering allocates/frees lots of icm_chunk structs. To make this more
efficiently, create a separate kmem_cache and allocate these chunks from
this allocator.
By doing this we observe that the alloc/free "hiccups" frequency has
become much lower, which allows for a more steady rule insersion rate.
Signed-off-by: default avatarYevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: default avatarAlex Vesker <valex@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 17b56073
......@@ -60,10 +60,19 @@ static int dr_domain_init_mem_resources(struct mlx5dr_domain *dmn)
{
int ret;
dmn->chunks_kmem_cache = kmem_cache_create("mlx5_dr_chunks",
sizeof(struct mlx5dr_icm_chunk), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!dmn->chunks_kmem_cache) {
mlx5dr_err(dmn, "Couldn't create chunks kmem_cache\n");
return -ENOMEM;
}
dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
if (!dmn->ste_icm_pool) {
mlx5dr_err(dmn, "Couldn't get icm memory\n");
return -ENOMEM;
ret = -ENOMEM;
goto free_chunks_kmem_cache;
}
dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
......@@ -85,6 +94,9 @@ static int dr_domain_init_mem_resources(struct mlx5dr_domain *dmn)
mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
free_ste_icm_pool:
mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
free_chunks_kmem_cache:
kmem_cache_destroy(dmn->chunks_kmem_cache);
return ret;
}
......@@ -93,6 +105,7 @@ static void dr_domain_uninit_mem_resources(struct mlx5dr_domain *dmn)
mlx5dr_send_info_pool_destroy(dmn);
mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
kmem_cache_destroy(dmn->chunks_kmem_cache);
}
static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
......
......@@ -9,6 +9,8 @@ struct mlx5dr_icm_pool {
enum mlx5dr_icm_type icm_type;
enum mlx5dr_icm_chunk_size max_log_chunk_sz;
struct mlx5dr_domain *dmn;
struct kmem_cache *chunks_kmem_cache;
/* memory management */
struct mutex mutex; /* protect the ICM pool and ICM buddy */
struct list_head buddy_mem_list;
......@@ -193,10 +195,13 @@ static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk)
{
struct kmem_cache *chunks_cache =
chunk->buddy_mem->pool->chunks_kmem_cache;
chunk->buddy_mem->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
list_del(&chunk->chunk_list);
kvfree(chunk);
kmem_cache_free(chunks_cache, chunk);
}
static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
......@@ -302,10 +307,11 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
unsigned int seg)
{
struct kmem_cache *chunks_cache = buddy_mem_pool->pool->chunks_kmem_cache;
struct mlx5dr_icm_chunk *chunk;
int offset;
chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
chunk = kmem_cache_alloc(chunks_cache, GFP_KERNEL);
if (!chunk)
return NULL;
......@@ -482,6 +488,7 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
pool->dmn = dmn;
pool->icm_type = icm_type;
pool->max_log_chunk_sz = max_log_chunk_sz;
pool->chunks_kmem_cache = dmn->chunks_kmem_cache;
INIT_LIST_HEAD(&pool->buddy_mem_list);
......
......@@ -915,6 +915,7 @@ struct mlx5dr_domain {
struct mlx5dr_icm_pool *action_icm_pool;
struct mlx5dr_send_info_pool *send_info_pool_rx;
struct mlx5dr_send_info_pool *send_info_pool_tx;
struct kmem_cache *chunks_kmem_cache;
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
struct xarray csum_fts_xa;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment