Commit 8e37210b authored by Matan Barak's avatar Matan Barak Committed by Doug Ledford

IB/core: Change ib_create_cq to use struct ib_cq_init_attr

Currently, ib_create_cq uses cqe and comp_vecotr instead
of the extendible ib_cq_init_attr struct.

Earlier patches already changed the vendors to work with
ib_cq_init_attr. This patch changes the consumers too.
Signed-off-by: default avatarMatan Barak <matanb@mellanox.com>
Signed-off-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent bcf4c1ea
...@@ -2923,6 +2923,7 @@ static int ib_mad_port_open(struct ib_device *device, ...@@ -2923,6 +2923,7 @@ static int ib_mad_port_open(struct ib_device *device,
unsigned long flags; unsigned long flags;
char name[sizeof "ib_mad123"]; char name[sizeof "ib_mad123"];
int has_smi; int has_smi;
struct ib_cq_init_attr cq_attr = {};
/* Create new device info */ /* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
...@@ -2943,9 +2944,10 @@ static int ib_mad_port_open(struct ib_device *device, ...@@ -2943,9 +2944,10 @@ static int ib_mad_port_open(struct ib_device *device,
if (has_smi) if (has_smi)
cq_size *= 2; cq_size *= 2;
cq_attr.cqe = cq_size;
port_priv->cq = ib_create_cq(port_priv->device, port_priv->cq = ib_create_cq(port_priv->device,
ib_mad_thread_completion_handler, ib_mad_thread_completion_handler,
NULL, port_priv, cq_size, 0); NULL, port_priv, &cq_attr);
if (IS_ERR(port_priv->cq)) { if (IS_ERR(port_priv->cq)) {
dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
ret = PTR_ERR(port_priv->cq); ret = PTR_ERR(port_priv->cq);
......
...@@ -1076,12 +1076,12 @@ EXPORT_SYMBOL(ib_destroy_qp); ...@@ -1076,12 +1076,12 @@ EXPORT_SYMBOL(ib_destroy_qp);
struct ib_cq *ib_create_cq(struct ib_device *device, struct ib_cq *ib_create_cq(struct ib_device *device,
ib_comp_handler comp_handler, ib_comp_handler comp_handler,
void (*event_handler)(struct ib_event *, void *), void (*event_handler)(struct ib_event *, void *),
void *cq_context, int cqe, int comp_vector) void *cq_context,
const struct ib_cq_init_attr *cq_attr)
{ {
struct ib_cq *cq; struct ib_cq *cq;
struct ib_cq_init_attr attr = {.cqe = cqe, .comp_vector = comp_vector};
cq = device->create_cq(device, &attr, NULL, NULL); cq = device->create_cq(device, cq_attr, NULL, NULL);
if (!IS_ERR(cq)) { if (!IS_ERR(cq)) {
cq->device = device; cq->device = device;
......
...@@ -552,6 +552,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port) ...@@ -552,6 +552,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
struct ib_cq *ibcq; struct ib_cq *ibcq;
struct ib_qp *ibqp; struct ib_qp *ibqp;
struct ib_qp_init_attr qp_init_attr; struct ib_qp_init_attr qp_init_attr;
struct ib_cq_init_attr cq_attr = {};
int ret; int ret;
if (sport->ibcq_aqp1) { if (sport->ibcq_aqp1) {
...@@ -559,7 +560,9 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port) ...@@ -559,7 +560,9 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
return -EPERM; return -EPERM;
} }
ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1), 10, 0); cq_attr.cqe = 10;
ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1),
&cq_attr);
if (IS_ERR(ibcq)) { if (IS_ERR(ibcq)) {
ehca_err(&shca->ib_device, "Cannot create AQP1 CQ."); ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
return PTR_ERR(ibcq); return PTR_ERR(ibcq);
......
...@@ -1774,6 +1774,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, ...@@ -1774,6 +1774,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
int create_tun, struct mlx4_ib_demux_pv_ctx *ctx) int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
{ {
int ret, cq_size; int ret, cq_size;
struct ib_cq_init_attr cq_attr = {};
if (ctx->state != DEMUX_PV_STATE_DOWN) if (ctx->state != DEMUX_PV_STATE_DOWN)
return -EEXIST; return -EEXIST;
...@@ -1802,8 +1803,9 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, ...@@ -1802,8 +1803,9 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
if (ctx->has_smi) if (ctx->has_smi)
cq_size *= 2; cq_size *= 2;
cq_attr.cqe = cq_size;
ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
NULL, ctx, cq_size, 0); NULL, ctx, &cq_attr);
if (IS_ERR(ctx->cq)) { if (IS_ERR(ctx->cq)) {
ret = PTR_ERR(ctx->cq); ret = PTR_ERR(ctx->cq);
pr_err("Couldn't create tunnel CQ (%d)\n", ret); pr_err("Couldn't create tunnel CQ (%d)\n", ret);
......
...@@ -758,6 +758,7 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, ...@@ -758,6 +758,7 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct mlx4_ib_xrcd *xrcd; struct mlx4_ib_xrcd *xrcd;
struct ib_cq_init_attr cq_attr = {};
int err; int err;
if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
...@@ -777,7 +778,8 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, ...@@ -777,7 +778,8 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
goto err2; goto err2;
} }
xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0); cq_attr.cqe = 1;
xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
if (IS_ERR(xrcd->cq)) { if (IS_ERR(xrcd->cq)) {
err = PTR_ERR(xrcd->cq); err = PTR_ERR(xrcd->cq);
goto err3; goto err3;
......
...@@ -971,6 +971,7 @@ static int create_umr_res(struct mlx5_ib_dev *dev) ...@@ -971,6 +971,7 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
struct ib_cq *cq; struct ib_cq *cq;
struct ib_qp *qp; struct ib_qp *qp;
struct ib_mr *mr; struct ib_mr *mr;
struct ib_cq_init_attr cq_attr = {};
int ret; int ret;
attr = kzalloc(sizeof(*attr), GFP_KERNEL); attr = kzalloc(sizeof(*attr), GFP_KERNEL);
...@@ -994,8 +995,9 @@ static int create_umr_res(struct mlx5_ib_dev *dev) ...@@ -994,8 +995,9 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
goto error_1; goto error_1;
} }
cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL, 128, cq_attr.cqe = 128;
0); cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL,
&cq_attr);
if (IS_ERR(cq)) { if (IS_ERR(cq)) {
mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
ret = PTR_ERR(cq); ret = PTR_ERR(cq);
......
...@@ -141,6 +141,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) ...@@ -141,6 +141,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
.sq_sig_type = IB_SIGNAL_ALL_WR, .sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_UD .qp_type = IB_QPT_UD
}; };
struct ib_cq_init_attr cq_attr = {};
int ret, size; int ret, size;
int i; int i;
...@@ -178,14 +179,17 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) ...@@ -178,14 +179,17 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
} else } else
goto out_free_wq; goto out_free_wq;
priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0); cq_attr.cqe = size;
priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
dev, &cq_attr);
if (IS_ERR(priv->recv_cq)) { if (IS_ERR(priv->recv_cq)) {
printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name); printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
goto out_cm_dev_cleanup; goto out_cm_dev_cleanup;
} }
cq_attr.cqe = ipoib_sendq_size;
priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL, priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL,
dev, ipoib_sendq_size, 0); dev, &cq_attr);
if (IS_ERR(priv->send_cq)) { if (IS_ERR(priv->send_cq)) {
printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name); printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name);
goto out_free_recv_cq; goto out_free_recv_cq;
......
...@@ -126,14 +126,17 @@ static int iser_create_device_ib_res(struct iser_device *device) ...@@ -126,14 +126,17 @@ static int iser_create_device_ib_res(struct iser_device *device)
goto pd_err; goto pd_err;
for (i = 0; i < device->comps_used; i++) { for (i = 0; i < device->comps_used; i++) {
struct ib_cq_init_attr cq_attr = {};
struct iser_comp *comp = &device->comps[i]; struct iser_comp *comp = &device->comps[i];
comp->device = device; comp->device = device;
cq_attr.cqe = max_cqe;
cq_attr.comp_vector = i;
comp->cq = ib_create_cq(device->ib_device, comp->cq = ib_create_cq(device->ib_device,
iser_cq_callback, iser_cq_callback,
iser_cq_event_callback, iser_cq_event_callback,
(void *)comp, (void *)comp,
max_cqe, i); &cq_attr);
if (IS_ERR(comp->cq)) { if (IS_ERR(comp->cq)) {
comp->cq = NULL; comp->cq = NULL;
goto cq_err; goto cq_err;
......
...@@ -318,15 +318,18 @@ isert_alloc_comps(struct isert_device *device, ...@@ -318,15 +318,18 @@ isert_alloc_comps(struct isert_device *device,
max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe); max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe);
for (i = 0; i < device->comps_used; i++) { for (i = 0; i < device->comps_used; i++) {
struct ib_cq_init_attr cq_attr = {};
struct isert_comp *comp = &device->comps[i]; struct isert_comp *comp = &device->comps[i];
comp->device = device; comp->device = device;
INIT_WORK(&comp->work, isert_cq_work); INIT_WORK(&comp->work, isert_cq_work);
cq_attr.cqe = max_cqe;
cq_attr.comp_vector = i;
comp->cq = ib_create_cq(device->ib_device, comp->cq = ib_create_cq(device->ib_device,
isert_cq_callback, isert_cq_callback,
isert_cq_event_callback, isert_cq_event_callback,
(void *)comp, (void *)comp,
max_cqe, i); &cq_attr);
if (IS_ERR(comp->cq)) { if (IS_ERR(comp->cq)) {
isert_err("Unable to allocate cq\n"); isert_err("Unable to allocate cq\n");
ret = PTR_ERR(comp->cq); ret = PTR_ERR(comp->cq);
......
...@@ -500,6 +500,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) ...@@ -500,6 +500,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
struct ib_fmr_pool *fmr_pool = NULL; struct ib_fmr_pool *fmr_pool = NULL;
struct srp_fr_pool *fr_pool = NULL; struct srp_fr_pool *fr_pool = NULL;
const int m = 1 + dev->use_fast_reg; const int m = 1 + dev->use_fast_reg;
struct ib_cq_init_attr cq_attr = {};
int ret; int ret;
init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
...@@ -507,15 +508,19 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) ...@@ -507,15 +508,19 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
return -ENOMEM; return -ENOMEM;
/* + 1 for SRP_LAST_WR_ID */ /* + 1 for SRP_LAST_WR_ID */
cq_attr.cqe = target->queue_size + 1;
cq_attr.comp_vector = ch->comp_vector;
recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch, recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
target->queue_size + 1, ch->comp_vector); &cq_attr);
if (IS_ERR(recv_cq)) { if (IS_ERR(recv_cq)) {
ret = PTR_ERR(recv_cq); ret = PTR_ERR(recv_cq);
goto err; goto err;
} }
cq_attr.cqe = m * target->queue_size;
cq_attr.comp_vector = ch->comp_vector;
send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch, send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
m * target->queue_size, ch->comp_vector); &cq_attr);
if (IS_ERR(send_cq)) { if (IS_ERR(send_cq)) {
ret = PTR_ERR(send_cq); ret = PTR_ERR(send_cq);
goto err_recv_cq; goto err_recv_cq;
......
...@@ -2080,6 +2080,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) ...@@ -2080,6 +2080,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
struct srpt_port *sport = ch->sport; struct srpt_port *sport = ch->sport;
struct srpt_device *sdev = sport->sdev; struct srpt_device *sdev = sport->sdev;
u32 srp_sq_size = sport->port_attrib.srp_sq_size; u32 srp_sq_size = sport->port_attrib.srp_sq_size;
struct ib_cq_init_attr cq_attr = {};
int ret; int ret;
WARN_ON(ch->rq_size < 1); WARN_ON(ch->rq_size < 1);
...@@ -2090,8 +2091,9 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) ...@@ -2090,8 +2091,9 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
goto out; goto out;
retry: retry:
cq_attr.cqe = ch->rq_size + srp_sq_size;
ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
ch->rq_size + srp_sq_size, 0); &cq_attr);
if (IS_ERR(ch->cq)) { if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq); ret = PTR_ERR(ch->cq);
pr_err("failed to create CQ cqe= %d ret= %d\n", pr_err("failed to create CQ cqe= %d ret= %d\n",
......
...@@ -647,6 +647,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, ...@@ -647,6 +647,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
kib_dev_t *dev; kib_dev_t *dev;
struct ib_qp_init_attr *init_qp_attr; struct ib_qp_init_attr *init_qp_attr;
struct kib_sched_info *sched; struct kib_sched_info *sched;
struct ib_cq_init_attr cq_attr = {};
kib_conn_t *conn; kib_conn_t *conn;
struct ib_cq *cq; struct ib_cq *cq;
unsigned long flags; unsigned long flags;
...@@ -742,10 +743,11 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, ...@@ -742,10 +743,11 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
kiblnd_map_rx_descs(conn); kiblnd_map_rx_descs(conn);
cq_attr.cqe = IBLND_CQ_ENTRIES(version);
cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
cq = ib_create_cq(cmid->device, cq = ib_create_cq(cmid->device,
kiblnd_cq_completion, kiblnd_cq_event, conn, kiblnd_cq_completion, kiblnd_cq_event, conn,
IBLND_CQ_ENTRIES(version), &cq_attr);
kiblnd_get_completion_vector(conn, cpt));
if (IS_ERR(cq)) { if (IS_ERR(cq)) {
CERROR("Can't create CQ: %ld, cqe: %d\n", CERROR("Can't create CQ: %ld, cqe: %d\n",
PTR_ERR(cq), IBLND_CQ_ENTRIES(version)); PTR_ERR(cq), IBLND_CQ_ENTRIES(version));
......
...@@ -2314,16 +2314,15 @@ static inline int ib_post_recv(struct ib_qp *qp, ...@@ -2314,16 +2314,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
* asynchronous event not associated with a completion occurs on the CQ. * asynchronous event not associated with a completion occurs on the CQ.
* @cq_context: Context associated with the CQ returned to the user via * @cq_context: Context associated with the CQ returned to the user via
* the associated completion and event handlers. * the associated completion and event handlers.
* @cqe: The minimum size of the CQ. * @cq_attr: The attributes the CQ should be created upon.
* @comp_vector - Completion vector used to signal completion events.
* Must be >= 0 and < context->num_comp_vectors.
* *
* Users can examine the cq structure to determine the actual CQ size. * Users can examine the cq structure to determine the actual CQ size.
*/ */
struct ib_cq *ib_create_cq(struct ib_device *device, struct ib_cq *ib_create_cq(struct ib_device *device,
ib_comp_handler comp_handler, ib_comp_handler comp_handler,
void (*event_handler)(struct ib_event *, void *), void (*event_handler)(struct ib_event *, void *),
void *cq_context, int cqe, int comp_vector); void *cq_context,
const struct ib_cq_init_attr *cq_attr);
/** /**
* ib_resize_cq - Modifies the capacity of the CQ. * ib_resize_cq - Modifies the capacity of the CQ.
......
...@@ -648,6 +648,7 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args) ...@@ -648,6 +648,7 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
struct rdma_conn_param conn_param; struct rdma_conn_param conn_param;
struct ib_qp_init_attr qp_attr; struct ib_qp_init_attr qp_attr;
struct ib_device_attr devattr; struct ib_device_attr devattr;
struct ib_cq_init_attr cq_attr = {};
/* Parse the transport specific mount options */ /* Parse the transport specific mount options */
err = parse_opts(args, &opts); err = parse_opts(args, &opts);
...@@ -705,9 +706,10 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args) ...@@ -705,9 +706,10 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
goto error; goto error;
/* Create the Completion Queue */ /* Create the Completion Queue */
cq_attr.cqe = opts.sq_depth + opts.rq_depth + 1;
rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler, rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
cq_event_handler, client, cq_event_handler, client,
opts.sq_depth + opts.rq_depth + 1, 0); &cq_attr);
if (IS_ERR(rdma->cq)) if (IS_ERR(rdma->cq))
goto error; goto error;
ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
......
...@@ -247,6 +247,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) ...@@ -247,6 +247,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_device *dev = ic->i_cm_id->device; struct ib_device *dev = ic->i_cm_id->device;
struct ib_qp_init_attr attr; struct ib_qp_init_attr attr;
struct ib_cq_init_attr cq_attr = {};
struct rds_ib_device *rds_ibdev; struct rds_ib_device *rds_ibdev;
int ret; int ret;
...@@ -270,9 +271,10 @@ static int rds_ib_setup_qp(struct rds_connection *conn) ...@@ -270,9 +271,10 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ic->i_pd = rds_ibdev->pd; ic->i_pd = rds_ibdev->pd;
ic->i_mr = rds_ibdev->mr; ic->i_mr = rds_ibdev->mr;
cq_attr.cqe = ic->i_send_ring.w_nr + 1;
ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler, ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler,
rds_ib_cq_event_handler, conn, rds_ib_cq_event_handler, conn,
ic->i_send_ring.w_nr + 1, 0); &cq_attr);
if (IS_ERR(ic->i_send_cq)) { if (IS_ERR(ic->i_send_cq)) {
ret = PTR_ERR(ic->i_send_cq); ret = PTR_ERR(ic->i_send_cq);
ic->i_send_cq = NULL; ic->i_send_cq = NULL;
...@@ -280,9 +282,10 @@ static int rds_ib_setup_qp(struct rds_connection *conn) ...@@ -280,9 +282,10 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
goto out; goto out;
} }
cq_attr.cqe = ic->i_recv_ring.w_nr;
ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler, ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler,
rds_ib_cq_event_handler, conn, rds_ib_cq_event_handler, conn,
ic->i_recv_ring.w_nr, 0); &cq_attr);
if (IS_ERR(ic->i_recv_cq)) { if (IS_ERR(ic->i_recv_cq)) {
ret = PTR_ERR(ic->i_recv_cq); ret = PTR_ERR(ic->i_recv_cq);
ic->i_recv_cq = NULL; ic->i_recv_cq = NULL;
......
...@@ -179,6 +179,7 @@ static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr, ...@@ -179,6 +179,7 @@ static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr,
void *context) void *context)
{ {
struct ib_device *dev = rds_iwdev->dev; struct ib_device *dev = rds_iwdev->dev;
struct ib_cq_init_attr cq_attr = {};
unsigned int send_size, recv_size; unsigned int send_size, recv_size;
int ret; int ret;
...@@ -198,9 +199,10 @@ static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr, ...@@ -198,9 +199,10 @@ static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr,
attr->sq_sig_type = IB_SIGNAL_REQ_WR; attr->sq_sig_type = IB_SIGNAL_REQ_WR;
attr->qp_type = IB_QPT_RC; attr->qp_type = IB_QPT_RC;
cq_attr.cqe = send_size;
attr->send_cq = ib_create_cq(dev, send_cq_handler, attr->send_cq = ib_create_cq(dev, send_cq_handler,
rds_iw_cq_event_handler, rds_iw_cq_event_handler,
context, send_size, 0); context, &cq_attr);
if (IS_ERR(attr->send_cq)) { if (IS_ERR(attr->send_cq)) {
ret = PTR_ERR(attr->send_cq); ret = PTR_ERR(attr->send_cq);
attr->send_cq = NULL; attr->send_cq = NULL;
...@@ -208,9 +210,10 @@ static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr, ...@@ -208,9 +210,10 @@ static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr,
goto out; goto out;
} }
cq_attr.cqe = recv_size;
attr->recv_cq = ib_create_cq(dev, recv_cq_handler, attr->recv_cq = ib_create_cq(dev, recv_cq_handler,
rds_iw_cq_event_handler, rds_iw_cq_event_handler,
context, recv_size, 0); context, &cq_attr);
if (IS_ERR(attr->recv_cq)) { if (IS_ERR(attr->recv_cq)) {
ret = PTR_ERR(attr->recv_cq); ret = PTR_ERR(attr->recv_cq);
attr->recv_cq = NULL; attr->recv_cq = NULL;
......
...@@ -855,6 +855,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -855,6 +855,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
struct svcxprt_rdma *listen_rdma; struct svcxprt_rdma *listen_rdma;
struct svcxprt_rdma *newxprt = NULL; struct svcxprt_rdma *newxprt = NULL;
struct rdma_conn_param conn_param; struct rdma_conn_param conn_param;
struct ib_cq_init_attr cq_attr = {};
struct ib_qp_init_attr qp_attr; struct ib_qp_init_attr qp_attr;
struct ib_device_attr devattr; struct ib_device_attr devattr;
int uninitialized_var(dma_mr_acc); int uninitialized_var(dma_mr_acc);
...@@ -907,22 +908,22 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -907,22 +908,22 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
dprintk("svcrdma: error creating PD for connect request\n"); dprintk("svcrdma: error creating PD for connect request\n");
goto errout; goto errout;
} }
cq_attr.cqe = newxprt->sc_sq_depth;
newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device, newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
sq_comp_handler, sq_comp_handler,
cq_event_handler, cq_event_handler,
newxprt, newxprt,
newxprt->sc_sq_depth, &cq_attr);
0);
if (IS_ERR(newxprt->sc_sq_cq)) { if (IS_ERR(newxprt->sc_sq_cq)) {
dprintk("svcrdma: error creating SQ CQ for connect request\n"); dprintk("svcrdma: error creating SQ CQ for connect request\n");
goto errout; goto errout;
} }
cq_attr.cqe = newxprt->sc_max_requests;
newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device, newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
rq_comp_handler, rq_comp_handler,
cq_event_handler, cq_event_handler,
newxprt, newxprt,
newxprt->sc_max_requests, &cq_attr);
0);
if (IS_ERR(newxprt->sc_rq_cq)) { if (IS_ERR(newxprt->sc_rq_cq)) {
dprintk("svcrdma: error creating RQ CQ for connect request\n"); dprintk("svcrdma: error creating RQ CQ for connect request\n");
goto errout; goto errout;
......
...@@ -644,6 +644,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, ...@@ -644,6 +644,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
{ {
struct ib_device_attr *devattr = &ia->ri_devattr; struct ib_device_attr *devattr = &ia->ri_devattr;
struct ib_cq *sendcq, *recvcq; struct ib_cq *sendcq, *recvcq;
struct ib_cq_init_attr cq_attr = {};
int rc, err; int rc, err;
/* check provider's send/recv wr limits */ /* check provider's send/recv wr limits */
...@@ -691,9 +692,9 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, ...@@ -691,9 +692,9 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
init_waitqueue_head(&ep->rep_connect_wait); init_waitqueue_head(&ep->rep_connect_wait);
INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1;
sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall, sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall,
rpcrdma_cq_async_error_upcall, ep, rpcrdma_cq_async_error_upcall, ep, &cq_attr);
ep->rep_attr.cap.max_send_wr + 1, 0);
if (IS_ERR(sendcq)) { if (IS_ERR(sendcq)) {
rc = PTR_ERR(sendcq); rc = PTR_ERR(sendcq);
dprintk("RPC: %s: failed to create send CQ: %i\n", dprintk("RPC: %s: failed to create send CQ: %i\n",
...@@ -708,9 +709,9 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, ...@@ -708,9 +709,9 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
goto out2; goto out2;
} }
cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1;
recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall, recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall,
rpcrdma_cq_async_error_upcall, ep, rpcrdma_cq_async_error_upcall, ep, &cq_attr);
ep->rep_attr.cap.max_recv_wr + 1, 0);
if (IS_ERR(recvcq)) { if (IS_ERR(recvcq)) {
rc = PTR_ERR(recvcq); rc = PTR_ERR(recvcq);
dprintk("RPC: %s: failed to create recv CQ: %i\n", dprintk("RPC: %s: failed to create recv CQ: %i\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment