Commit bfd745f8 authored by Leon Romanovsky's avatar Leon Romanovsky

RDMA/mlx5: Delete Q counter allocations command

Remove mlx5_ib implementation of Q counter allocation logic
together with cleaning boolean which controlled validity of the
counter. It is not needed, because counter_id == 0 means that
counter is not valid.
Reviewed-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
parent 66247fbb
...@@ -327,23 +327,6 @@ int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid) ...@@ -327,23 +327,6 @@ int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
u16 uid)
{
u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
int err;
MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
MLX5_SET(alloc_q_counter_in, in, uid, uid);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*counter_id = MLX5_GET(alloc_q_counter_out, out,
counter_set_id);
return err;
}
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port) u16 opmod, u8 port)
{ {
......
...@@ -61,8 +61,6 @@ int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, ...@@ -61,8 +61,6 @@ int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
u32 qpn, u16 uid); u32 qpn, u16 uid);
int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid); int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid); int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
int mlx5_cmd_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id,
u16 uid);
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port); u16 opmod, u8 port);
#endif /* MLX5_IB_CMD_H */ #endif /* MLX5_IB_CMD_H */
...@@ -5449,7 +5449,7 @@ static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) ...@@ -5449,7 +5449,7 @@ static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
MLX5_CMD_OP_DEALLOC_Q_COUNTER); MLX5_CMD_OP_DEALLOC_Q_COUNTER);
for (i = 0; i < num_cnt_ports; i++) { for (i = 0; i < num_cnt_ports; i++) {
if (dev->port[i].cnts.set_id_valid) { if (dev->port[i].cnts.set_id) {
MLX5_SET(dealloc_q_counter_in, in, counter_set_id, MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
dev->port[i].cnts.set_id); dev->port[i].cnts.set_id);
mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in); mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in);
...@@ -5562,11 +5562,14 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, ...@@ -5562,11 +5562,14 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
{ {
u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
int num_cnt_ports; int num_cnt_ports;
int err = 0; int err = 0;
int i; int i;
bool is_shared; bool is_shared;
MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
...@@ -5578,17 +5581,19 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) ...@@ -5578,17 +5581,19 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
mlx5_ib_fill_counters(dev, dev->port[i].cnts.names, mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
dev->port[i].cnts.offsets); dev->port[i].cnts.offsets);
err = mlx5_cmd_alloc_q_counter(dev->mdev, MLX5_SET(alloc_q_counter_in, in, uid,
&dev->port[i].cnts.set_id, is_shared ? MLX5_SHARED_RESOURCE_UID : 0);
is_shared ?
MLX5_SHARED_RESOURCE_UID : 0); err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out);
if (err) { if (err) {
mlx5_ib_warn(dev, mlx5_ib_warn(dev,
"couldn't allocate queue counter for port %d, err %d\n", "couldn't allocate queue counter for port %d, err %d\n",
i + 1, err); i + 1, err);
goto err_alloc; goto err_alloc;
} }
dev->port[i].cnts.set_id_valid = true;
dev->port[i].cnts.set_id =
MLX5_GET(alloc_q_counter_out, out, counter_set_id);
} }
return 0; return 0;
...@@ -5785,16 +5790,20 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, ...@@ -5785,16 +5790,20 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
struct ib_qp *qp) struct ib_qp *qp)
{ {
struct mlx5_ib_dev *dev = to_mdev(qp->device); struct mlx5_ib_dev *dev = to_mdev(qp->device);
u16 cnt_set_id = 0;
int err; int err;
if (!counter->id) { if (!counter->id) {
err = mlx5_cmd_alloc_q_counter(dev->mdev, u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
&cnt_set_id, u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
MLX5_SHARED_RESOURCE_UID);
MLX5_SET(alloc_q_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_Q_COUNTER);
MLX5_SET(alloc_q_counter_in, in, uid, MLX5_SHARED_RESOURCE_UID);
err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out);
if (err) if (err)
return err; return err;
counter->id = cnt_set_id; counter->id =
MLX5_GET(alloc_q_counter_out, out, counter_set_id);
} }
err = mlx5_ib_qp_set_counter(qp, counter); err = mlx5_ib_qp_set_counter(qp, counter);
......
...@@ -780,7 +780,6 @@ struct mlx5_ib_counters { ...@@ -780,7 +780,6 @@ struct mlx5_ib_counters {
u32 num_cong_counters; u32 num_cong_counters;
u32 num_ext_ppcnt_counters; u32 num_ext_ppcnt_counters;
u16 set_id; u16 set_id;
bool set_id_valid;
}; };
struct mlx5_ib_multiport_info; struct mlx5_ib_multiport_info;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment