Commit bcf4c1ea authored by Matan Barak's avatar Matan Barak Committed by Doug Ledford

IB/core: Change provider's API of create_cq to be extendible

Add a new ib_cq_init_attr structure which contains the
previous cqe (minimum number of CQ entries) and comp_vector
(completion vector) in addition to a new flags field.
All vendors' create_cq callbacks are changed in order
to work with the new API.

This commit does not change any functionality.
Signed-off-by: default avatarMatan Barak <matanb@mellanox.com>
Signed-off-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Reviewed-By: Devesh Sharma <devesh.sharma@avagotech.com> to patch #2
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 74217d4c
...@@ -1341,6 +1341,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, ...@@ -1341,6 +1341,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
struct ib_uverbs_event_file *ev_file = NULL; struct ib_uverbs_event_file *ev_file = NULL;
struct ib_cq *cq; struct ib_cq *cq;
int ret; int ret;
struct ib_cq_init_attr attr = {};
if (out_len < sizeof resp) if (out_len < sizeof resp)
return -ENOSPC; return -ENOSPC;
...@@ -1376,8 +1377,9 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, ...@@ -1376,8 +1377,9 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
INIT_LIST_HEAD(&obj->comp_list); INIT_LIST_HEAD(&obj->comp_list);
INIT_LIST_HEAD(&obj->async_list); INIT_LIST_HEAD(&obj->async_list);
cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, attr.cqe = cmd.cqe;
cmd.comp_vector, attr.comp_vector = cmd.comp_vector;
cq = file->device->ib_dev->create_cq(file->device->ib_dev, &attr,
file->ucontext, &udata); file->ucontext, &udata);
if (IS_ERR(cq)) { if (IS_ERR(cq)) {
ret = PTR_ERR(cq); ret = PTR_ERR(cq);
......
...@@ -1079,8 +1079,9 @@ struct ib_cq *ib_create_cq(struct ib_device *device, ...@@ -1079,8 +1079,9 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
void *cq_context, int cqe, int comp_vector) void *cq_context, int cqe, int comp_vector)
{ {
struct ib_cq *cq; struct ib_cq *cq;
struct ib_cq_init_attr attr = {.cqe = cqe, .comp_vector = comp_vector};
cq = device->create_cq(device, cqe, comp_vector, NULL, NULL); cq = device->create_cq(device, &attr, NULL, NULL);
if (!IS_ERR(cq)) { if (!IS_ERR(cq)) {
cq->device = device; cq->device = device;
......
...@@ -286,13 +286,18 @@ static int c2_destroy_qp(struct ib_qp *ib_qp) ...@@ -286,13 +286,18 @@ static int c2_destroy_qp(struct ib_qp *ib_qp)
return 0; return 0;
} }
static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector, static struct ib_cq *c2_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int entries = attr->cqe;
struct c2_cq *cq; struct c2_cq *cq;
int err; int err;
if (attr->flags)
return ERR_PTR(-EINVAL);
cq = kmalloc(sizeof(*cq), GFP_KERNEL); cq = kmalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) { if (!cq) {
pr_debug("%s: Unable to allocate CQ\n", __func__); pr_debug("%s: Unable to allocate CQ\n", __func__);
......
...@@ -138,10 +138,12 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq) ...@@ -138,10 +138,12 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq)
return 0; return 0;
} }
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_context, struct ib_ucontext *ib_context,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int entries = attr->cqe;
struct iwch_dev *rhp; struct iwch_dev *rhp;
struct iwch_cq *chp; struct iwch_cq *chp;
struct iwch_create_cq_resp uresp; struct iwch_create_cq_resp uresp;
...@@ -151,6 +153,9 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve ...@@ -151,6 +153,9 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
size_t resplen; size_t resplen;
PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
if (attr->flags)
return ERR_PTR(-EINVAL);
rhp = to_iwch_dev(ibdev); rhp = to_iwch_dev(ibdev);
chp = kzalloc(sizeof(*chp), GFP_KERNEL); chp = kzalloc(sizeof(*chp), GFP_KERNEL);
if (!chp) if (!chp)
......
...@@ -864,10 +864,13 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq) ...@@ -864,10 +864,13 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
return 0; return 0;
} }
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
int vector, struct ib_ucontext *ib_context, const struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_context,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int entries = attr->cqe;
int vector = attr->comp_vector;
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_cq *chp; struct c4iw_cq *chp;
struct c4iw_create_cq_resp uresp; struct c4iw_create_cq_resp uresp;
...@@ -877,6 +880,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, ...@@ -877,6 +880,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
struct c4iw_mm_entry *mm, *mm2; struct c4iw_mm_entry *mm, *mm2;
PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
if (attr->flags)
return ERR_PTR(-EINVAL);
rhp = to_c4iw_dev(ibdev); rhp = to_c4iw_dev(ibdev);
......
...@@ -990,8 +990,8 @@ int c4iw_reregister_phys_mem(struct ib_mr *mr, ...@@ -990,8 +990,8 @@ int c4iw_reregister_phys_mem(struct ib_mr *mr,
int acc, u64 *iova_start); int acc, u64 *iova_start);
int c4iw_dereg_mr(struct ib_mr *ib_mr); int c4iw_dereg_mr(struct ib_mr *ib_mr);
int c4iw_destroy_cq(struct ib_cq *ib_cq); int c4iw_destroy_cq(struct ib_cq *ib_cq);
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
int vector, const struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_context, struct ib_ucontext *ib_context,
struct ib_udata *udata); struct ib_udata *udata);
int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata); int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
......
...@@ -113,10 +113,12 @@ struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num) ...@@ -113,10 +113,12 @@ struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
return ret; return ret;
} }
struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, struct ib_cq *ehca_create_cq(struct ib_device *device,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int cqe = attr->cqe;
static const u32 additional_cqe = 20; static const u32 additional_cqe = 20;
struct ib_cq *cq; struct ib_cq *cq;
struct ehca_cq *my_cq; struct ehca_cq *my_cq;
...@@ -131,6 +133,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, ...@@ -131,6 +133,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
int ipz_rc, i; int ipz_rc, i;
unsigned long flags; unsigned long flags;
if (attr->flags)
return ERR_PTR(-EINVAL);
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
...@@ -129,7 +129,8 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq); ...@@ -129,7 +129,8 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq); void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, struct ib_cq *ehca_create_cq(struct ib_device *device,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
......
...@@ -188,7 +188,7 @@ static void send_complete(unsigned long data) ...@@ -188,7 +188,7 @@ static void send_complete(unsigned long data)
/** /**
* ipath_create_cq - create a completion queue * ipath_create_cq - create a completion queue
* @ibdev: the device this completion queue is attached to * @ibdev: the device this completion queue is attached to
* @entries: the minimum size of the completion queue * @attr: creation attributes
* @context: unused by the InfiniPath driver * @context: unused by the InfiniPath driver
* @udata: unused by the InfiniPath driver * @udata: unused by the InfiniPath driver
* *
...@@ -197,16 +197,21 @@ static void send_complete(unsigned long data) ...@@ -197,16 +197,21 @@ static void send_complete(unsigned long data)
* *
* Called by ib_create_cq() in the generic verbs code. * Called by ib_create_cq() in the generic verbs code.
*/ */
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int entries = attr->cqe;
struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_ibdev *dev = to_idev(ibdev);
struct ipath_cq *cq; struct ipath_cq *cq;
struct ipath_cq_wc *wc; struct ipath_cq_wc *wc;
struct ib_cq *ret; struct ib_cq *ret;
u32 sz; u32 sz;
if (attr->flags)
return ERR_PTR(-EINVAL);
if (entries < 1 || entries > ib_ipath_max_cqes) { if (entries < 1 || entries > ib_ipath_max_cqes) {
ret = ERR_PTR(-EINVAL); ret = ERR_PTR(-EINVAL);
goto done; goto done;
......
...@@ -807,7 +807,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); ...@@ -807,7 +807,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
......
...@@ -166,15 +166,21 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont ...@@ -166,15 +166,21 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
return err; return err;
} }
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int entries = attr->cqe;
int vector = attr->comp_vector;
struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_cq *cq; struct mlx4_ib_cq *cq;
struct mlx4_uar *uar; struct mlx4_uar *uar;
int err; int err;
if (attr->flags)
return ERR_PTR(-EINVAL);
if (entries < 1 || entries > dev->dev->caps.max_cqes) if (entries < 1 || entries > dev->dev->caps.max_cqes)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
...@@ -668,7 +668,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); ...@@ -668,7 +668,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int mlx4_ib_destroy_cq(struct ib_cq *cq); int mlx4_ib_destroy_cq(struct ib_cq *cq);
......
...@@ -736,10 +736,13 @@ static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) ...@@ -736,10 +736,13 @@ static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
mlx5_db_free(dev->mdev, &cq->db); mlx5_db_free(dev->mdev, &cq->db);
} }
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
int vector, struct ib_ucontext *context, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int entries = attr->cqe;
int vector = attr->comp_vector;
struct mlx5_create_cq_mbox_in *cqb = NULL; struct mlx5_create_cq_mbox_in *cqb = NULL;
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_cq *cq; struct mlx5_ib_cq *cq;
...@@ -750,6 +753,9 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, ...@@ -750,6 +753,9 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
int eqn; int eqn;
int err; int err;
if (attr->flags)
return ERR_PTR(-EINVAL);
if (entries < 0) if (entries < 0)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
...@@ -1087,6 +1087,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) ...@@ -1087,6 +1087,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
{ {
struct ib_srq_init_attr attr; struct ib_srq_init_attr attr;
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev;
struct ib_cq_init_attr cq_attr = {.cqe = 1};
int ret = 0; int ret = 0;
dev = container_of(devr, struct mlx5_ib_dev, devr); dev = container_of(devr, struct mlx5_ib_dev, devr);
...@@ -1100,7 +1101,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) ...@@ -1100,7 +1101,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
devr->p0->uobject = NULL; devr->p0->uobject = NULL;
atomic_set(&devr->p0->usecnt, 0); atomic_set(&devr->p0->usecnt, 0);
devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, 1, 0, NULL, NULL); devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
if (IS_ERR(devr->c0)) { if (IS_ERR(devr->c0)) {
ret = PTR_ERR(devr->c0); ret = PTR_ERR(devr->c0);
goto error1; goto error1;
......
...@@ -556,8 +556,9 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -556,8 +556,9 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
void *buffer, u32 length); void *buffer, u32 length);
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
int vector, struct ib_ucontext *context, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int mlx5_ib_destroy_cq(struct ib_cq *cq); int mlx5_ib_destroy_cq(struct ib_cq *cq);
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
......
...@@ -641,16 +641,20 @@ static int mthca_destroy_qp(struct ib_qp *qp) ...@@ -641,16 +641,20 @@ static int mthca_destroy_qp(struct ib_qp *qp)
return 0; return 0;
} }
static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
int comp_vector, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int entries = attr->cqe;
struct mthca_create_cq ucmd; struct mthca_create_cq ucmd;
struct mthca_cq *cq; struct mthca_cq *cq;
int nent; int nent;
int err; int err;
if (attr->flags)
return ERR_PTR(-EINVAL);
if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
...@@ -1526,10 +1526,12 @@ static int nes_destroy_qp(struct ib_qp *ibqp) ...@@ -1526,10 +1526,12 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
/** /**
* nes_create_cq * nes_create_cq
*/ */
static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
int comp_vector, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_udata *udata) struct ib_ucontext *context,
struct ib_udata *udata)
{ {
int entries = attr->cqe;
u64 u64temp; u64 u64temp;
struct nes_vnic *nesvnic = to_nesvnic(ibdev); struct nes_vnic *nesvnic = to_nesvnic(ibdev);
struct nes_device *nesdev = nesvnic->nesdev; struct nes_device *nesdev = nesvnic->nesdev;
...@@ -1549,6 +1551,9 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, ...@@ -1549,6 +1551,9 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
unsigned long flags; unsigned long flags;
int ret; int ret;
if (attr->flags)
return ERR_PTR(-EINVAL);
if (entries > nesadapter->max_cqe) if (entries > nesadapter->max_cqe)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
...@@ -1004,10 +1004,12 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, ...@@ -1004,10 +1004,12 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
return status; return status;
} }
struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_ctx, struct ib_ucontext *ib_ctx,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int entries = attr->cqe;
struct ocrdma_cq *cq; struct ocrdma_cq *cq;
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
struct ocrdma_ucontext *uctx = NULL; struct ocrdma_ucontext *uctx = NULL;
...@@ -1015,6 +1017,9 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, ...@@ -1015,6 +1017,9 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
int status; int status;
struct ocrdma_create_cq_ureq ureq; struct ocrdma_create_cq_ureq ureq;
if (attr->flags)
return ERR_PTR(-EINVAL);
if (udata) { if (udata) {
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
return ERR_PTR(-EFAULT); return ERR_PTR(-EFAULT);
......
...@@ -59,8 +59,10 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *, ...@@ -59,8 +59,10 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *,
struct ib_ucontext *, struct ib_udata *); struct ib_ucontext *, struct ib_udata *);
int ocrdma_dealloc_pd(struct ib_pd *pd); int ocrdma_dealloc_pd(struct ib_pd *pd);
struct ib_cq *ocrdma_create_cq(struct ib_device *, int entries, int vector, struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
struct ib_ucontext *, struct ib_udata *); const struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_ctx,
struct ib_udata *udata);
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
int ocrdma_destroy_cq(struct ib_cq *); int ocrdma_destroy_cq(struct ib_cq *);
......
...@@ -203,7 +203,7 @@ static void send_complete(struct kthread_work *work) ...@@ -203,7 +203,7 @@ static void send_complete(struct kthread_work *work)
/** /**
* qib_create_cq - create a completion queue * qib_create_cq - create a completion queue
* @ibdev: the device this completion queue is attached to * @ibdev: the device this completion queue is attached to
* @entries: the minimum size of the completion queue * @attr: creation attributes
* @context: unused by the QLogic_IB driver * @context: unused by the QLogic_IB driver
* @udata: user data for libibverbs.so * @udata: user data for libibverbs.so
* *
...@@ -212,16 +212,21 @@ static void send_complete(struct kthread_work *work) ...@@ -212,16 +212,21 @@ static void send_complete(struct kthread_work *work)
* *
* Called by ib_create_cq() in the generic verbs code. * Called by ib_create_cq() in the generic verbs code.
*/ */
struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, struct ib_cq *qib_create_cq(struct ib_device *ibdev,
int comp_vector, struct ib_ucontext *context, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int entries = attr->cqe;
struct qib_ibdev *dev = to_idev(ibdev); struct qib_ibdev *dev = to_idev(ibdev);
struct qib_cq *cq; struct qib_cq *cq;
struct qib_cq_wc *wc; struct qib_cq_wc *wc;
struct ib_cq *ret; struct ib_cq *ret;
u32 sz; u32 sz;
if (attr->flags)
return ERR_PTR(-EINVAL);
if (entries < 1 || entries > ib_qib_max_cqes) { if (entries < 1 || entries > ib_qib_max_cqes) {
ret = ERR_PTR(-EINVAL); ret = ERR_PTR(-EINVAL);
goto done; goto done;
......
...@@ -1007,8 +1007,9 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig); ...@@ -1007,8 +1007,9 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, struct ib_cq *qib_create_cq(struct ib_device *ibdev,
int comp_vector, struct ib_ucontext *context, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int qib_destroy_cq(struct ib_cq *ibcq); int qib_destroy_cq(struct ib_cq *ibcq);
......
...@@ -570,13 +570,17 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -570,13 +570,17 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return status; return status;
} }
struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries, struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
int vector, struct ib_ucontext *context, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct ib_cq *cq; struct ib_cq *cq;
usnic_dbg("\n"); usnic_dbg("\n");
if (attr->flags)
return ERR_PTR(-EINVAL);
cq = kzalloc(sizeof(*cq), GFP_KERNEL); cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) if (!cq)
return ERR_PTR(-EBUSY); return ERR_PTR(-EBUSY);
......
...@@ -46,8 +46,9 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, ...@@ -46,8 +46,9 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
int usnic_ib_destroy_qp(struct ib_qp *qp); int usnic_ib_destroy_qp(struct ib_qp *qp);
int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);
struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries, struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
int vector, struct ib_ucontext *context, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int usnic_ib_destroy_cq(struct ib_cq *cq); int usnic_ib_destroy_cq(struct ib_cq *cq);
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
......
...@@ -173,6 +173,12 @@ struct ib_odp_caps { ...@@ -173,6 +173,12 @@ struct ib_odp_caps {
} per_transport_caps; } per_transport_caps;
}; };
struct ib_cq_init_attr {
unsigned int cqe;
int comp_vector;
u32 flags;
};
struct ib_device_attr { struct ib_device_attr {
u64 fw_ver; u64 fw_ver;
__be64 sys_image_guid; __be64 sys_image_guid;
...@@ -1613,8 +1619,8 @@ struct ib_device { ...@@ -1613,8 +1619,8 @@ struct ib_device {
int (*post_recv)(struct ib_qp *qp, int (*post_recv)(struct ib_qp *qp,
struct ib_recv_wr *recv_wr, struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr); struct ib_recv_wr **bad_recv_wr);
struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, struct ib_cq * (*create_cq)(struct ib_device *device,
int comp_vector, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment