Commit a2a88b8e authored by Aharon Landau's avatar Aharon Landau Committed by Jason Gunthorpe

RDMA/mlx5: Don't keep umrable 'page_shift' in cache entries

mkc.log_page_size can be changed using UMR. Therefore, don't treat it as a
cache entry property.

Removing it from struct mlx5_cache_ent.

All cache mkeys will be created with default PAGE_SHIFT, and updated with
the needed page_shift using UMR when passing them to a user.

Link: https://lore.kernel.org/r/20230125222807.6921-2-michaelgur@nvidia.comSigned-off-by: default avatarAharon Landau <aharonl@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 592627cc
...@@ -739,7 +739,6 @@ struct mlx5_cache_ent { ...@@ -739,7 +739,6 @@ struct mlx5_cache_ent {
char name[4]; char name[4];
u32 order; u32 order;
u32 access_mode; u32 access_mode;
u32 page;
unsigned int ndescs; unsigned int ndescs;
u8 disabled:1; u8 disabled:1;
......
...@@ -297,7 +297,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) ...@@ -297,7 +297,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
MLX5_SET(mkc, mkc, translations_octword_size, MLX5_SET(mkc, mkc, translations_octword_size,
get_mkc_octo_size(ent->access_mode, ent->ndescs)); get_mkc_octo_size(ent->access_mode, ent->ndescs));
MLX5_SET(mkc, mkc, log_page_size, ent->page); MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
} }
/* Asynchronously schedule new MRs to be populated in the cache. */ /* Asynchronously schedule new MRs to be populated in the cache. */
...@@ -765,7 +765,6 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) ...@@ -765,7 +765,6 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
if (ent->order > mkey_cache_max_order(dev)) if (ent->order > mkey_cache_max_order(dev))
continue; continue;
ent->page = PAGE_SHIFT;
ent->ndescs = 1 << ent->order; ent->ndescs = 1 << ent->order;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) && if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
......
...@@ -1594,14 +1594,12 @@ void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) ...@@ -1594,14 +1594,12 @@ void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent)
switch (ent->order - 2) { switch (ent->order - 2) {
case MLX5_IMR_MTT_CACHE_ENTRY: case MLX5_IMR_MTT_CACHE_ENTRY:
ent->page = PAGE_SHIFT;
ent->ndescs = MLX5_IMR_MTT_ENTRIES; ent->ndescs = MLX5_IMR_MTT_ENTRIES;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
ent->limit = 0; ent->limit = 0;
break; break;
case MLX5_IMR_KSM_CACHE_ENTRY: case MLX5_IMR_KSM_CACHE_ENTRY:
ent->page = MLX5_KSM_PAGE_SHIFT;
ent->ndescs = mlx5_imr_ksm_entries; ent->ndescs = mlx5_imr_ksm_entries;
ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM; ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
ent->limit = 0; ent->limit = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment