Commit c3bdbaea authored by Maher Sanalla's avatar Maher Sanalla Committed by Saeed Mahameed

net/mlx5: Store page counters in a single array

Currently, an independent page counter is used for tracking memory usage
for each function type such as VF, PF and host PF (DPU).

For better code-readibilty, use a single array that stores
the number of allocated memory pages for each function type.
Signed-off-by: default avatarMaher Sanalla <msanalla@nvidia.com>
Reviewed-by: default avatarShay Drory <shayd@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 8aa5f171
...@@ -245,8 +245,8 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev) ...@@ -245,8 +245,8 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
pages = dev->priv.dbg.pages_debugfs; pages = dev->priv.dbg.pages_debugfs;
debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages); debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages); debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages); debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed); debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped); debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages, debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
......
...@@ -87,7 +87,7 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev) ...@@ -87,7 +87,7 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
mlx5_host_pf_cleanup(dev); mlx5_host_pf_cleanup(dev);
err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages); err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
if (err) if (err)
mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err); mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
} }
...@@ -74,6 +74,14 @@ static u32 get_function(u16 func_id, bool ec_function) ...@@ -74,6 +74,14 @@ static u32 get_function(u16 func_id, bool ec_function)
return (u32)func_id | (ec_function << 16); return (u32)func_id | (ec_function << 16);
} }
static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
{
if (!func_id)
return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
return MLX5_VF;
}
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function) static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{ {
struct rb_root *root; struct rb_root *root;
...@@ -332,6 +340,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, ...@@ -332,6 +340,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
int notify_fail = event; int notify_fail = event;
u16 func_type;
u64 addr; u64 addr;
int err; int err;
u32 *in; u32 *in;
...@@ -383,11 +392,9 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, ...@@ -383,11 +392,9 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
goto out_dropped; goto out_dropped;
} }
func_type = func_id_to_type(dev, func_id, ec_function);
dev->priv.page_counters[func_type] += npages;
dev->priv.fw_pages += npages; dev->priv.fw_pages += npages;
if (func_id)
dev->priv.vfs_pages += npages;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.host_pf_pages += npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n", mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
npages, ec_function, func_id, err); npages, ec_function, func_id, err);
...@@ -414,6 +421,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id, ...@@ -414,6 +421,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
struct rb_root *root; struct rb_root *root;
struct rb_node *p; struct rb_node *p;
int npages = 0; int npages = 0;
u16 func_type;
root = xa_load(&dev->priv.page_root_xa, function); root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root)) if (WARN_ON_ONCE(!root))
...@@ -428,11 +436,9 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id, ...@@ -428,11 +436,9 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
free_fwp(dev, fwp, fwp->free_count); free_fwp(dev, fwp, fwp->free_count);
} }
func_type = func_id_to_type(dev, func_id, ec_function);
dev->priv.page_counters[func_type] -= npages;
dev->priv.fw_pages -= npages; dev->priv.fw_pages -= npages;
if (func_id)
dev->priv.vfs_pages -= npages;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.host_pf_pages -= npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n", mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
npages, ec_function, func_id); npages, ec_function, func_id);
...@@ -498,6 +504,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, ...@@ -498,6 +504,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed; int num_claimed;
u16 func_type;
u32 *out; u32 *out;
int err; int err;
int i; int i;
...@@ -549,11 +556,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, ...@@ -549,11 +556,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
if (nclaimed) if (nclaimed)
*nclaimed = num_claimed; *nclaimed = num_claimed;
func_type = func_id_to_type(dev, func_id, ec_function);
dev->priv.page_counters[func_type] -= num_claimed;
dev->priv.fw_pages -= num_claimed; dev->priv.fw_pages -= num_claimed;
if (func_id)
dev->priv.vfs_pages -= num_claimed;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.host_pf_pages -= num_claimed;
out_free: out_free:
kvfree(out); kvfree(out);
...@@ -706,12 +711,12 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) ...@@ -706,12 +711,12 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
WARN(dev->priv.fw_pages, WARN(dev->priv.fw_pages,
"FW pages counter is %d after reclaiming all pages\n", "FW pages counter is %d after reclaiming all pages\n",
dev->priv.fw_pages); dev->priv.fw_pages);
WARN(dev->priv.vfs_pages, WARN(dev->priv.page_counters[MLX5_VF],
"VFs FW pages counter is %d after reclaiming all pages\n", "VFs FW pages counter is %d after reclaiming all pages\n",
dev->priv.vfs_pages); dev->priv.page_counters[MLX5_VF]);
WARN(dev->priv.host_pf_pages, WARN(dev->priv.page_counters[MLX5_HOST_PF],
"External host PF FW pages counter is %d after reclaiming all pages\n", "External host PF FW pages counter is %d after reclaiming all pages\n",
dev->priv.host_pf_pages); dev->priv.page_counters[MLX5_HOST_PF]);
return 0; return 0;
} }
......
...@@ -147,7 +147,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf) ...@@ -147,7 +147,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf); mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages)) if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
} }
......
...@@ -573,6 +573,13 @@ struct mlx5_debugfs_entries { ...@@ -573,6 +573,13 @@ struct mlx5_debugfs_entries {
struct dentry *lag_debugfs; struct dentry *lag_debugfs;
}; };
enum mlx5_func_type {
MLX5_PF,
MLX5_VF,
MLX5_HOST_PF,
MLX5_FUNC_TYPE_NUM,
};
struct mlx5_ft_pool; struct mlx5_ft_pool;
struct mlx5_priv { struct mlx5_priv {
/* IRQ table valid only for real pci devices PF or VF */ /* IRQ table valid only for real pci devices PF or VF */
...@@ -583,11 +590,10 @@ struct mlx5_priv { ...@@ -583,11 +590,10 @@ struct mlx5_priv {
struct mlx5_nb pg_nb; struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq; struct workqueue_struct *pg_wq;
struct xarray page_root_xa; struct xarray page_root_xa;
u32 fw_pages;
atomic_t reg_pages; atomic_t reg_pages;
struct list_head free_list; struct list_head free_list;
u32 vfs_pages; u32 fw_pages;
u32 host_pf_pages; u32 page_counters[MLX5_FUNC_TYPE_NUM];
u32 fw_pages_alloc_failed; u32 fw_pages_alloc_failed;
u32 give_pages_dropped; u32 give_pages_dropped;
u32 reclaim_pages_discard; u32 reclaim_pages_discard;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment