Commit a49b1dc7 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Doug Ledford

RDMA: Convert destroy_wq to be void

All callers of destroy WQ are always success and there is no need
to check their return value, so convert destroy_wq to be void.
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Reviewed-by: default avatarYuval Shaia <yuval.shaia@oracle.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 8d18ad83
...@@ -2235,19 +2235,17 @@ EXPORT_SYMBOL(ib_create_wq); ...@@ -2235,19 +2235,17 @@ EXPORT_SYMBOL(ib_create_wq);
*/ */
int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
{ {
int err;
struct ib_cq *cq = wq->cq; struct ib_cq *cq = wq->cq;
struct ib_pd *pd = wq->pd; struct ib_pd *pd = wq->pd;
if (atomic_read(&wq->usecnt)) if (atomic_read(&wq->usecnt))
return -EBUSY; return -EBUSY;
err = wq->device->ops.destroy_wq(wq, udata); wq->device->ops.destroy_wq(wq, udata);
if (!err) { atomic_dec(&pd->usecnt);
atomic_dec(&pd->usecnt); atomic_dec(&cq->usecnt);
atomic_dec(&cq->usecnt);
} return 0;
return err;
} }
EXPORT_SYMBOL(ib_destroy_wq); EXPORT_SYMBOL(ib_destroy_wq);
......
...@@ -906,7 +906,7 @@ void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port); ...@@ -906,7 +906,7 @@ void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr, struct ib_wq_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); void mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata); u32 wq_attr_mask, struct ib_udata *udata);
......
...@@ -4248,7 +4248,7 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr, ...@@ -4248,7 +4248,7 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
return err; return err;
} }
int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata) void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
{ {
struct mlx4_ib_dev *dev = to_mdev(ibwq->device); struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
...@@ -4259,8 +4259,6 @@ int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata) ...@@ -4259,8 +4259,6 @@ int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata); destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
kfree(qp); kfree(qp);
return 0;
} }
struct ib_rwq_ind_table struct ib_rwq_ind_table
......
...@@ -1201,7 +1201,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, ...@@ -1201,7 +1201,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr, struct ib_wq_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata); u32 wq_attr_mask, struct ib_udata *udata);
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
......
...@@ -6047,7 +6047,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, ...@@ -6047,7 +6047,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
return ERR_PTR(err); return ERR_PTR(err);
} }
int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
{ {
struct mlx5_ib_dev *dev = to_mdev(wq->device); struct mlx5_ib_dev *dev = to_mdev(wq->device);
struct mlx5_ib_rwq *rwq = to_mrwq(wq); struct mlx5_ib_rwq *rwq = to_mrwq(wq);
...@@ -6055,8 +6055,6 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) ...@@ -6055,8 +6055,6 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp); mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
destroy_user_rq(dev, wq->pd, rwq, udata); destroy_user_rq(dev, wq->pd, rwq, udata);
kfree(rwq); kfree(rwq);
return 0;
} }
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
......
...@@ -2509,7 +2509,7 @@ struct ib_device_ops { ...@@ -2509,7 +2509,7 @@ struct ib_device_ops {
struct ib_wq *(*create_wq)(struct ib_pd *pd, struct ib_wq *(*create_wq)(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr, struct ib_wq_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
u32 wq_attr_mask, struct ib_udata *udata); u32 wq_attr_mask, struct ib_udata *udata);
struct ib_rwq_ind_table *(*create_rwq_ind_table)( struct ib_rwq_ind_table *(*create_rwq_ind_table)(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment