Commit 2dc4d672 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe

RDMA/mlx5: Change GSI QP to have same creation flow like other QPs

There is no reason to have separate create flow for the GSI QP, while
general create_qp routine has all needed checks and ability to allocate
and free the proper struct mlx5_ib_qp.

Link: https://lore.kernel.org/r/20200926102450.2966017-4-leon@kernel.orgReviewed-by: default avatarMaor Gottlieb <maorg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent f8225e34
...@@ -89,14 +89,13 @@ static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc) ...@@ -89,14 +89,13 @@ static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc)
spin_unlock_irqrestore(&gsi->lock, flags); spin_unlock_irqrestore(&gsi->lock, flags);
} }
struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
struct ib_qp_init_attr *init_attr) struct ib_qp_init_attr *attr)
{ {
struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_qp *mqp;
struct mlx5_ib_gsi_qp *gsi; struct mlx5_ib_gsi_qp *gsi;
struct ib_qp_init_attr hw_init_attr = *init_attr; struct ib_qp_init_attr hw_init_attr = *attr;
const u8 port_num = init_attr->port_num; const u8 port_num = attr->port_num;
int num_qps = 0; int num_qps = 0;
int ret; int ret;
...@@ -108,27 +107,19 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, ...@@ -108,27 +107,19 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
num_qps = MLX5_MAX_PORTS; num_qps = MLX5_MAX_PORTS;
} }
mqp = kzalloc(sizeof(struct mlx5_ib_qp), GFP_KERNEL);
if (!mqp)
return ERR_PTR(-ENOMEM);
gsi = &mqp->gsi; gsi = &mqp->gsi;
gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL); gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL);
if (!gsi->tx_qps) { if (!gsi->tx_qps)
ret = -ENOMEM; return -ENOMEM;
goto err_free;
}
gsi->outstanding_wrs = kcalloc(init_attr->cap.max_send_wr, gsi->outstanding_wrs =
sizeof(*gsi->outstanding_wrs), kcalloc(attr->cap.max_send_wr, sizeof(*gsi->outstanding_wrs),
GFP_KERNEL); GFP_KERNEL);
if (!gsi->outstanding_wrs) { if (!gsi->outstanding_wrs) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_free_tx; goto err_free_tx;
} }
mutex_init(&mqp->mutex);
mutex_lock(&dev->devr.mutex); mutex_lock(&dev->devr.mutex);
if (dev->devr.ports[port_num - 1].gsi) { if (dev->devr.ports[port_num - 1].gsi) {
...@@ -140,12 +131,11 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, ...@@ -140,12 +131,11 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
gsi->num_qps = num_qps; gsi->num_qps = num_qps;
spin_lock_init(&gsi->lock); spin_lock_init(&gsi->lock);
gsi->cap = init_attr->cap; gsi->cap = attr->cap;
gsi->sq_sig_type = init_attr->sq_sig_type; gsi->sq_sig_type = attr->sq_sig_type;
mqp->ibqp.qp_num = 1;
gsi->port_num = port_num; gsi->port_num = port_num;
gsi->cq = ib_alloc_cq(pd->device, gsi, init_attr->cap.max_send_wr, 0, gsi->cq = ib_alloc_cq(pd->device, gsi, attr->cap.max_send_wr, 0,
IB_POLL_SOFTIRQ); IB_POLL_SOFTIRQ);
if (IS_ERR(gsi->cq)) { if (IS_ERR(gsi->cq)) {
mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n", mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
...@@ -181,11 +171,11 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, ...@@ -181,11 +171,11 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
INIT_LIST_HEAD(&gsi->rx_qp->rdma_mrs); INIT_LIST_HEAD(&gsi->rx_qp->rdma_mrs);
INIT_LIST_HEAD(&gsi->rx_qp->sig_mrs); INIT_LIST_HEAD(&gsi->rx_qp->sig_mrs);
dev->devr.ports[init_attr->port_num - 1].gsi = gsi; dev->devr.ports[attr->port_num - 1].gsi = gsi;
mutex_unlock(&dev->devr.mutex); mutex_unlock(&dev->devr.mutex);
return &mqp->ibqp; return 0;
err_destroy_cq: err_destroy_cq:
ib_free_cq(gsi->cq); ib_free_cq(gsi->cq);
...@@ -194,9 +184,7 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, ...@@ -194,9 +184,7 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
kfree(gsi->outstanding_wrs); kfree(gsi->outstanding_wrs);
err_free_tx: err_free_tx:
kfree(gsi->tx_qps); kfree(gsi->tx_qps);
err_free: return ret;
kfree(mqp);
return ERR_PTR(ret);
} }
int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp) int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp)
......
...@@ -1335,8 +1335,8 @@ void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num); ...@@ -1335,8 +1335,8 @@ void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num); void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
/* GSI QP helper functions */ /* GSI QP helper functions */
struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
struct ib_qp_init_attr *init_attr); struct ib_qp_init_attr *attr);
int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp); int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp);
int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
int attr_mask); int attr_mask);
...@@ -1375,7 +1375,7 @@ static inline void init_query_mad(struct ib_smp *mad) ...@@ -1375,7 +1375,7 @@ static inline void init_query_mad(struct ib_smp *mad)
static inline int is_qp1(enum ib_qp_type qp_type) static inline int is_qp1(enum ib_qp_type qp_type)
{ {
return qp_type == MLX5_IB_QPT_HW_GSI; return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
} }
#define MLX5_MAX_UMR_SHIFT 16 #define MLX5_MAX_UMR_SHIFT 16
......
...@@ -2785,21 +2785,23 @@ static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -2785,21 +2785,23 @@ static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto out; goto out;
} }
if (qp->type == MLX5_IB_QPT_DCT) { switch (qp->type) {
case MLX5_IB_QPT_DCT:
err = create_dct(dev, pd, qp, params); err = create_dct(dev, pd, qp, params);
goto out; break;
} case IB_QPT_XRC_TGT:
if (qp->type == IB_QPT_XRC_TGT) {
err = create_xrc_tgt_qp(dev, qp, params); err = create_xrc_tgt_qp(dev, qp, params);
goto out; break;
case IB_QPT_GSI:
err = mlx5_ib_create_gsi(pd, qp, params->attr);
break;
default:
if (params->udata)
err = create_user_qp(dev, pd, qp, params);
else
err = create_kernel_qp(dev, pd, qp, params);
} }
if (params->udata)
err = create_user_qp(dev, pd, qp, params);
else
err = create_kernel_qp(dev, pd, qp, params);
out: out:
if (err) { if (err) {
mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type); mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type);
...@@ -2939,9 +2941,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, ...@@ -2939,9 +2941,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
if (attr->qp_type == IB_QPT_GSI)
return mlx5_ib_gsi_create_qp(pd, attr);
params.udata = udata; params.udata = udata;
params.uidx = MLX5_IB_DEFAULT_UIDX; params.uidx = MLX5_IB_DEFAULT_UIDX;
params.attr = attr; params.attr = attr;
...@@ -3010,9 +3009,14 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, ...@@ -3010,9 +3009,14 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
return &qp->ibqp; return &qp->ibqp;
destroy_qp: destroy_qp:
if (qp->type == MLX5_IB_QPT_DCT) { switch (qp->type) {
case MLX5_IB_QPT_DCT:
mlx5_ib_destroy_dct(qp); mlx5_ib_destroy_dct(qp);
} else { break;
case IB_QPT_GSI:
mlx5_ib_destroy_gsi(qp);
break;
default:
/* /*
* These lines below are temp solution till QP allocation * These lines below are temp solution till QP allocation
* will be moved to be under IB/core responsiblity. * will be moved to be under IB/core responsiblity.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment