Commit edaea001 authored by Yevgeny Kliteynik's avatar Yevgeny Kliteynik Committed by Saeed Mahameed

net/mlx5: DR, Remove the buddy used_list

No need to have the used_list - we don't need to keep track of the
used chunks, we only need to know the amount of used memory.
Signed-off-by: default avatarYevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: default avatarAlex Vesker <valex@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 4519fc45
...@@ -15,7 +15,6 @@ int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy, ...@@ -15,7 +15,6 @@ int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
buddy->max_order = max_order; buddy->max_order = max_order;
INIT_LIST_HEAD(&buddy->list_node); INIT_LIST_HEAD(&buddy->list_node);
INIT_LIST_HEAD(&buddy->used_list);
buddy->bitmap = kcalloc(buddy->max_order + 1, buddy->bitmap = kcalloc(buddy->max_order + 1,
sizeof(*buddy->bitmap), sizeof(*buddy->bitmap),
......
...@@ -207,17 +207,6 @@ static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset) ...@@ -207,17 +207,6 @@ static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
num_of_entries * sizeof(chunk->ste_arr[0])); num_of_entries * sizeof(chunk->ste_arr[0]));
} }
static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk)
{
struct kmem_cache *chunks_cache =
chunk->buddy_mem->pool->chunks_kmem_cache;
chunk->buddy_mem->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
list_del(&chunk->chunk_list);
kmem_cache_free(chunks_cache, chunk);
}
static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy) static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
{ {
int num_of_entries = int num_of_entries =
...@@ -297,11 +286,6 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) ...@@ -297,11 +286,6 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy) static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
{ {
struct mlx5dr_icm_chunk *chunk, *next;
list_for_each_entry_safe(chunk, next, &buddy->used_list, chunk_list)
dr_icm_chunk_destroy(chunk);
dr_icm_pool_mr_destroy(buddy->icm_mr); dr_icm_pool_mr_destroy(buddy->icm_mr);
mlx5dr_buddy_cleanup(buddy); mlx5dr_buddy_cleanup(buddy);
...@@ -312,36 +296,25 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy) ...@@ -312,36 +296,25 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
kvfree(buddy); kvfree(buddy);
} }
static struct mlx5dr_icm_chunk * static void
dr_icm_chunk_create(struct mlx5dr_icm_pool *pool, dr_icm_chunk_init(struct mlx5dr_icm_chunk *chunk,
struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size, enum mlx5dr_icm_chunk_size chunk_size,
struct mlx5dr_icm_buddy_mem *buddy_mem_pool, struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
unsigned int seg) unsigned int seg)
{ {
struct kmem_cache *chunks_cache = buddy_mem_pool->pool->chunks_kmem_cache;
struct mlx5dr_icm_chunk *chunk;
int offset; int offset;
chunk = kmem_cache_alloc(chunks_cache, GFP_KERNEL);
if (!chunk)
return NULL;
offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
chunk->seg = seg; chunk->seg = seg;
chunk->size = chunk_size; chunk->size = chunk_size;
chunk->buddy_mem = buddy_mem_pool; chunk->buddy_mem = buddy_mem_pool;
if (pool->icm_type == DR_ICM_TYPE_STE) if (pool->icm_type == DR_ICM_TYPE_STE) {
offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
dr_icm_chunk_ste_init(chunk, offset); dr_icm_chunk_ste_init(chunk, offset);
}
buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk); buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
INIT_LIST_HEAD(&chunk->chunk_list);
/* chunk now is part of the used_list */
list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
return chunk;
} }
static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool) static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
...@@ -463,10 +436,12 @@ mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool, ...@@ -463,10 +436,12 @@ mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
if (ret) if (ret)
goto out; goto out;
chunk = dr_icm_chunk_create(pool, chunk_size, buddy, seg); chunk = kmem_cache_alloc(pool->chunks_kmem_cache, GFP_KERNEL);
if (!chunk) if (!chunk)
goto out_err; goto out_err;
dr_icm_chunk_init(chunk, pool, chunk_size, buddy, seg);
goto out; goto out;
out_err: out_err:
...@@ -495,7 +470,6 @@ void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk) ...@@ -495,7 +470,6 @@ void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
hot_chunk->seg = chunk->seg; hot_chunk->seg = chunk->seg;
hot_chunk->size = chunk->size; hot_chunk->size = chunk->size;
list_del(&chunk->chunk_list);
kmem_cache_free(chunks_cache, chunk); kmem_cache_free(chunks_cache, chunk);
/* Check if we have chunks that are waiting for sync-ste */ /* Check if we have chunks that are waiting for sync-ste */
......
...@@ -1111,7 +1111,6 @@ int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr, ...@@ -1111,7 +1111,6 @@ int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
struct mlx5dr_icm_chunk { struct mlx5dr_icm_chunk {
struct mlx5dr_icm_buddy_mem *buddy_mem; struct mlx5dr_icm_buddy_mem *buddy_mem;
struct list_head chunk_list;
/* indicates the index of this chunk in the whole memory, /* indicates the index of this chunk in the whole memory,
* used for deleting the chunk from the buddy * used for deleting the chunk from the buddy
......
...@@ -164,8 +164,7 @@ struct mlx5dr_icm_buddy_mem { ...@@ -164,8 +164,7 @@ struct mlx5dr_icm_buddy_mem {
struct mlx5dr_icm_mr *icm_mr; struct mlx5dr_icm_mr *icm_mr;
struct mlx5dr_icm_pool *pool; struct mlx5dr_icm_pool *pool;
/* This is the list of used chunks. HW may be accessing this memory */ /* Amount of memory in used chunks - HW may be accessing this memory */
struct list_head used_list;
u64 used_memory; u64 used_memory;
/* Memory optimisation */ /* Memory optimisation */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment