Commit 919dce24 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma updates from Jason Gunthorpe:
 "The majority of the patches are cleanups, refactorings and clarity
  improvements.

  This cycle saw some more activity from Syzkaller, I think we are now
  clean on all but one of those bugs, including the long standing and
  obnoxious rdma_cm locking design defect. Continue to see many drivers
  getting cleanups, with a few new user visible features.

  Summary:

   - Various driver updates for siw, bnxt_re, rxe, efa, mlx5, hfi1

   - Lots of cleanup patches for hns

   - Convert more places to use refcount

   - Aggressively lock the RDMA CM code that syzkaller says isn't
     working

   - Work to clarify ib_cm

   - Use the new ib_device lifecycle model in bnxt_re

   - Fix mlx5's MR cache which seems to be failing more often with the
     new ODP code

   - mlx5 'dynamic uar' and 'tx steering' user interfaces"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (144 commits)
  RDMA/bnxt_re: make bnxt_re_ib_init static
  IB/qib: Delete struct qib_ivdev.qp_rnd
  RDMA/hns: Fix uninitialized variable bug
  RDMA/hns: Modify the mask of QP number for CQE of hip08
  RDMA/hns: Reduce the maximum number of extend SGE per WQE
  RDMA/hns: Reduce PFC frames in congestion scenarios
  RDMA/mlx5: Add support for RDMA TX flow table
  net/mlx5: Add support for RDMA TX steering
  IB/hfi1: Call kobject_put() when kobject_init_and_add() fails
  IB/hfi1: Fix memory leaks in sysfs registration and unregistration
  IB/mlx5: Move to fully dynamic UAR mode once user space supports it
  IB/mlx5: Limit the scope of struct mlx5_bfreg_info to mlx5_ib
  IB/mlx5: Extend QP creation to get uar page index from user space
  IB/mlx5: Extend CQ creation to get uar page index from user space
  IB/mlx5: Expose UAR object and its alloc/destroy commands
  IB/hfi1: Get rid of a warning
  RDMA/hns: Remove redundant judgment of qp_type
  RDMA/hns: Remove redundant assignment of wc->smac when polling cq
  RDMA/hns: Remove redundant qpc setup operations
  RDMA/hns: Remove meaningless prints
  ...
parents 50a5de89 b4d8ddf8
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
struct ib_pkey_cache { struct ib_pkey_cache {
int table_len; int table_len;
u16 table[0]; u16 table[];
}; };
struct ib_update_work { struct ib_update_work {
...@@ -972,6 +972,23 @@ int rdma_query_gid(struct ib_device *device, u8 port_num, ...@@ -972,6 +972,23 @@ int rdma_query_gid(struct ib_device *device, u8 port_num,
} }
EXPORT_SYMBOL(rdma_query_gid); EXPORT_SYMBOL(rdma_query_gid);
/**
* rdma_read_gid_hw_context - Read the HW GID context from GID attribute
* @attr: Potinter to the GID attribute
*
* rdma_read_gid_hw_context() reads the drivers GID HW context corresponding
* to the SGID attr. Callers are required to already be holding the reference
* to an existing GID entry.
*
* Returns the HW GID context
*
*/
void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr)
{
return container_of(attr, struct ib_gid_table_entry, attr)->context;
}
EXPORT_SYMBOL(rdma_read_gid_hw_context);
/** /**
* rdma_find_gid - Returns SGID attributes if the matching GID is found. * rdma_find_gid - Returns SGID attributes if the matching GID is found.
* @device: The device to query. * @device: The device to query.
......
This diff is collapsed.
This diff is collapsed.
...@@ -94,7 +94,7 @@ static int cma_configfs_params_get(struct config_item *item, ...@@ -94,7 +94,7 @@ static int cma_configfs_params_get(struct config_item *item,
static void cma_configfs_params_put(struct cma_device *cma_dev) static void cma_configfs_params_put(struct cma_device *cma_dev)
{ {
cma_deref_dev(cma_dev); cma_dev_put(cma_dev);
} }
static ssize_t default_roce_mode_show(struct config_item *item, static ssize_t default_roce_mode_show(struct config_item *item,
...@@ -312,12 +312,12 @@ static struct config_group *make_cma_dev(struct config_group *group, ...@@ -312,12 +312,12 @@ static struct config_group *make_cma_dev(struct config_group *group,
configfs_add_default_group(&cma_dev_group->ports_group, configfs_add_default_group(&cma_dev_group->ports_group,
&cma_dev_group->device_group); &cma_dev_group->device_group);
cma_deref_dev(cma_dev); cma_dev_put(cma_dev);
return &cma_dev_group->device_group; return &cma_dev_group->device_group;
fail: fail:
if (cma_dev) if (cma_dev)
cma_deref_dev(cma_dev); cma_dev_put(cma_dev);
kfree(cma_dev_group); kfree(cma_dev_group);
return ERR_PTR(err); return ERR_PTR(err);
} }
......
...@@ -66,7 +66,7 @@ struct rdma_id_private { ...@@ -66,7 +66,7 @@ struct rdma_id_private {
struct mutex qp_mutex; struct mutex qp_mutex;
struct completion comp; struct completion comp;
atomic_t refcount; refcount_t refcount;
struct mutex handler_mutex; struct mutex handler_mutex;
int backlog; int backlog;
...@@ -111,8 +111,8 @@ static inline void cma_configfs_exit(void) ...@@ -111,8 +111,8 @@ static inline void cma_configfs_exit(void)
} }
#endif #endif
void cma_ref_dev(struct cma_device *dev); void cma_dev_get(struct cma_device *dev);
void cma_deref_dev(struct cma_device *dev); void cma_dev_put(struct cma_device *dev);
typedef bool (*cma_device_filter)(struct ib_device *, void *); typedef bool (*cma_device_filter)(struct ib_device *, void *);
struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
void *cookie); void *cookie);
......
...@@ -79,13 +79,13 @@ struct ib_mad_private { ...@@ -79,13 +79,13 @@ struct ib_mad_private {
struct ib_mad_private_header header; struct ib_mad_private_header header;
size_t mad_size; size_t mad_size;
struct ib_grh grh; struct ib_grh grh;
u8 mad[0]; u8 mad[];
} __packed; } __packed;
struct ib_rmpp_segment { struct ib_rmpp_segment {
struct list_head list; struct list_head list;
u32 num; u32 num;
u8 data[0]; u8 data[];
}; };
struct ib_mad_agent_private { struct ib_mad_agent_private {
......
...@@ -71,7 +71,7 @@ struct mcast_device { ...@@ -71,7 +71,7 @@ struct mcast_device {
struct ib_event_handler event_handler; struct ib_event_handler event_handler;
int start_port; int start_port;
int end_port; int end_port;
struct mcast_port port[0]; struct mcast_port port[];
}; };
enum mcast_state { enum mcast_state {
......
...@@ -391,13 +391,13 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, ...@@ -391,13 +391,13 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return -EINVAL; return -EINVAL;
} }
ret = ib_dma_map_sg(dev, sg, sg_cnt, dir); ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
if (!ret) if (!ret)
return -ENOMEM; return -ENOMEM;
sg_cnt = ret; sg_cnt = ret;
if (prot_sg_cnt) { if (prot_sg_cnt) {
ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir); ret = rdma_rw_map_sg(dev, prot_sg, prot_sg_cnt, dir);
if (!ret) { if (!ret) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unmap_sg; goto out_unmap_sg;
...@@ -466,9 +466,9 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, ...@@ -466,9 +466,9 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
kfree(ctx->reg); kfree(ctx->reg);
out_unmap_prot_sg: out_unmap_prot_sg:
if (prot_sg_cnt) if (prot_sg_cnt)
ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir); rdma_rw_unmap_sg(dev, prot_sg, prot_sg_cnt, dir);
out_unmap_sg: out_unmap_sg:
ib_dma_unmap_sg(dev, sg, sg_cnt, dir); rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
return ret; return ret;
} }
EXPORT_SYMBOL(rdma_rw_ctx_signature_init); EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
...@@ -628,9 +628,9 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, ...@@ -628,9 +628,9 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr); ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
kfree(ctx->reg); kfree(ctx->reg);
ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
if (prot_sg_cnt) if (prot_sg_cnt)
ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir); rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
} }
EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
......
...@@ -101,7 +101,7 @@ struct ib_sa_port { ...@@ -101,7 +101,7 @@ struct ib_sa_port {
struct ib_sa_device { struct ib_sa_device {
int start_port, end_port; int start_port, end_port;
struct ib_event_handler event_handler; struct ib_event_handler event_handler;
struct ib_sa_port port[0]; struct ib_sa_port port[];
}; };
struct ib_sa_query { struct ib_sa_query {
......
...@@ -85,12 +85,13 @@ struct ucma_file { ...@@ -85,12 +85,13 @@ struct ucma_file {
struct ucma_context { struct ucma_context {
u32 id; u32 id;
struct completion comp; struct completion comp;
atomic_t ref; refcount_t ref;
int events_reported; int events_reported;
int backlog; int backlog;
struct ucma_file *file; struct ucma_file *file;
struct rdma_cm_id *cm_id; struct rdma_cm_id *cm_id;
struct mutex mutex;
u64 uid; u64 uid;
struct list_head list; struct list_head list;
...@@ -152,7 +153,7 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) ...@@ -152,7 +153,7 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
if (ctx->closing) if (ctx->closing)
ctx = ERR_PTR(-EIO); ctx = ERR_PTR(-EIO);
else else
atomic_inc(&ctx->ref); refcount_inc(&ctx->ref);
} }
xa_unlock(&ctx_table); xa_unlock(&ctx_table);
return ctx; return ctx;
...@@ -160,7 +161,7 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) ...@@ -160,7 +161,7 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
static void ucma_put_ctx(struct ucma_context *ctx) static void ucma_put_ctx(struct ucma_context *ctx)
{ {
if (atomic_dec_and_test(&ctx->ref)) if (refcount_dec_and_test(&ctx->ref))
complete(&ctx->comp); complete(&ctx->comp);
} }
...@@ -212,10 +213,11 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) ...@@ -212,10 +213,11 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
return NULL; return NULL;
INIT_WORK(&ctx->close_work, ucma_close_id); INIT_WORK(&ctx->close_work, ucma_close_id);
atomic_set(&ctx->ref, 1); refcount_set(&ctx->ref, 1);
init_completion(&ctx->comp); init_completion(&ctx->comp);
INIT_LIST_HEAD(&ctx->mc_list); INIT_LIST_HEAD(&ctx->mc_list);
ctx->file = file; ctx->file = file;
mutex_init(&ctx->mutex);
if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL)) if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
goto error; goto error;
...@@ -589,6 +591,7 @@ static int ucma_free_ctx(struct ucma_context *ctx) ...@@ -589,6 +591,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
} }
events_reported = ctx->events_reported; events_reported = ctx->events_reported;
mutex_destroy(&ctx->mutex);
kfree(ctx); kfree(ctx);
return events_reported; return events_reported;
} }
...@@ -658,7 +661,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, ...@@ -658,7 +661,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
} }
...@@ -681,7 +687,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, ...@@ -681,7 +687,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
} }
...@@ -705,8 +713,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, ...@@ -705,8 +713,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
} }
...@@ -731,8 +741,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file, ...@@ -731,8 +741,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
} }
...@@ -752,7 +764,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file, ...@@ -752,7 +764,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
} }
...@@ -841,6 +855,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, ...@@ -841,6 +855,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
memset(&resp, 0, sizeof resp); memset(&resp, 0, sizeof resp);
addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
...@@ -864,6 +879,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, ...@@ -864,6 +879,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
ucma_copy_iw_route(&resp, &ctx->cm_id->route); ucma_copy_iw_route(&resp, &ctx->cm_id->route);
out: out:
mutex_unlock(&ctx->mutex);
if (copy_to_user(u64_to_user_ptr(cmd.response), if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp))) &resp, sizeof(resp)))
ret = -EFAULT; ret = -EFAULT;
...@@ -1014,6 +1030,7 @@ static ssize_t ucma_query(struct ucma_file *file, ...@@ -1014,6 +1030,7 @@ static ssize_t ucma_query(struct ucma_file *file,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
switch (cmd.option) { switch (cmd.option) {
case RDMA_USER_CM_QUERY_ADDR: case RDMA_USER_CM_QUERY_ADDR:
ret = ucma_query_addr(ctx, response, out_len); ret = ucma_query_addr(ctx, response, out_len);
...@@ -1028,6 +1045,7 @@ static ssize_t ucma_query(struct ucma_file *file, ...@@ -1028,6 +1045,7 @@ static ssize_t ucma_query(struct ucma_file *file,
ret = -ENOSYS; ret = -ENOSYS;
break; break;
} }
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
...@@ -1045,7 +1063,7 @@ static void ucma_copy_conn_param(struct rdma_cm_id *id, ...@@ -1045,7 +1063,7 @@ static void ucma_copy_conn_param(struct rdma_cm_id *id,
dst->retry_count = src->retry_count; dst->retry_count = src->retry_count;
dst->rnr_retry_count = src->rnr_retry_count; dst->rnr_retry_count = src->rnr_retry_count;
dst->srq = src->srq; dst->srq = src->srq;
dst->qp_num = src->qp_num; dst->qp_num = src->qp_num & 0xFFFFFF;
dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0; dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
} }
...@@ -1068,7 +1086,9 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, ...@@ -1068,7 +1086,9 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
return PTR_ERR(ctx); return PTR_ERR(ctx);
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
mutex_lock(&ctx->mutex);
ret = rdma_connect(ctx->cm_id, &conn_param); ret = rdma_connect(ctx->cm_id, &conn_param);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
} }
...@@ -1089,7 +1109,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, ...@@ -1089,7 +1109,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
cmd.backlog : max_backlog; cmd.backlog : max_backlog;
mutex_lock(&ctx->mutex);
ret = rdma_listen(ctx->cm_id, ctx->backlog); ret = rdma_listen(ctx->cm_id, ctx->backlog);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
} }
...@@ -1112,13 +1134,17 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, ...@@ -1112,13 +1134,17 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
if (cmd.conn_param.valid) { if (cmd.conn_param.valid) {
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
mutex_lock(&file->mut); mutex_lock(&file->mut);
mutex_lock(&ctx->mutex);
ret = __rdma_accept(ctx->cm_id, &conn_param, NULL); ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
mutex_unlock(&ctx->mutex);
if (!ret) if (!ret)
ctx->uid = cmd.uid; ctx->uid = cmd.uid;
mutex_unlock(&file->mut); mutex_unlock(&file->mut);
} else } else {
mutex_lock(&ctx->mutex);
ret = __rdma_accept(ctx->cm_id, NULL, NULL); ret = __rdma_accept(ctx->cm_id, NULL, NULL);
mutex_unlock(&ctx->mutex);
}
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
} }
...@@ -1137,7 +1163,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, ...@@ -1137,7 +1163,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
} }
...@@ -1156,7 +1184,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, ...@@ -1156,7 +1184,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_disconnect(ctx->cm_id); ret = rdma_disconnect(ctx->cm_id);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
} }
...@@ -1187,7 +1217,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, ...@@ -1187,7 +1217,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
resp.qp_attr_mask = 0; resp.qp_attr_mask = 0;
memset(&qp_attr, 0, sizeof qp_attr); memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.qp_state = cmd.qp_state; qp_attr.qp_state = cmd.qp_state;
mutex_lock(&ctx->mutex);
ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
mutex_unlock(&ctx->mutex);
if (ret) if (ret)
goto out; goto out;
...@@ -1273,9 +1305,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx, ...@@ -1273,9 +1305,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
struct sa_path_rec opa; struct sa_path_rec opa;
sa_convert_path_ib_to_opa(&opa, &sa_path); sa_convert_path_ib_to_opa(&opa, &sa_path);
mutex_lock(&ctx->mutex);
ret = rdma_set_ib_path(ctx->cm_id, &opa); ret = rdma_set_ib_path(ctx->cm_id, &opa);
mutex_unlock(&ctx->mutex);
} else { } else {
mutex_lock(&ctx->mutex);
ret = rdma_set_ib_path(ctx->cm_id, &sa_path); ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
mutex_unlock(&ctx->mutex);
} }
if (ret) if (ret)
return ret; return ret;
...@@ -1308,7 +1344,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level, ...@@ -1308,7 +1344,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level,
switch (level) { switch (level) {
case RDMA_OPTION_ID: case RDMA_OPTION_ID:
mutex_lock(&ctx->mutex);
ret = ucma_set_option_id(ctx, optname, optval, optlen); ret = ucma_set_option_id(ctx, optname, optval, optlen);
mutex_unlock(&ctx->mutex);
break; break;
case RDMA_OPTION_IB: case RDMA_OPTION_IB:
ret = ucma_set_option_ib(ctx, optname, optval, optlen); ret = ucma_set_option_ib(ctx, optname, optval, optlen);
...@@ -1368,8 +1406,10 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, ...@@ -1368,8 +1406,10 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
if (ctx->cm_id->device) if (ctx->cm_id->device)
ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
...@@ -1412,8 +1452,10 @@ static ssize_t ucma_process_join(struct ucma_file *file, ...@@ -1412,8 +1452,10 @@ static ssize_t ucma_process_join(struct ucma_file *file,
mc->join_state = join_state; mc->join_state = join_state;
mc->uid = cmd->uid; mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size); memcpy(&mc->addr, addr, cmd->addr_size);
mutex_lock(&ctx->mutex);
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
join_state, mc); join_state, mc);
mutex_unlock(&ctx->mutex);
if (ret) if (ret)
goto err2; goto err2;
...@@ -1502,7 +1544,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, ...@@ -1502,7 +1544,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
mc = ERR_PTR(-ENOENT); mc = ERR_PTR(-ENOENT);
else if (mc->ctx->file != file) else if (mc->ctx->file != file)
mc = ERR_PTR(-EINVAL); mc = ERR_PTR(-EINVAL);
else if (!atomic_inc_not_zero(&mc->ctx->ref)) else if (!refcount_inc_not_zero(&mc->ctx->ref))
mc = ERR_PTR(-ENXIO); mc = ERR_PTR(-ENXIO);
else else
__xa_erase(&multicast_table, mc->id); __xa_erase(&multicast_table, mc->id);
...@@ -1513,7 +1555,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, ...@@ -1513,7 +1555,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
goto out; goto out;
} }
mutex_lock(&mc->ctx->mutex);
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
mutex_unlock(&mc->ctx->mutex);
mutex_lock(&mc->ctx->file->mut); mutex_lock(&mc->ctx->file->mut);
ucma_cleanup_mc_events(mc); ucma_cleanup_mc_events(mc);
list_del(&mc->list); list_del(&mc->list);
......
...@@ -197,6 +197,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, ...@@ -197,6 +197,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
unsigned long lock_limit; unsigned long lock_limit;
unsigned long new_pinned; unsigned long new_pinned;
unsigned long cur_base; unsigned long cur_base;
unsigned long dma_attr = 0;
struct mm_struct *mm; struct mm_struct *mm;
unsigned long npages; unsigned long npages;
int ret; int ret;
...@@ -278,10 +279,12 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, ...@@ -278,10 +279,12 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
sg_mark_end(sg); sg_mark_end(sg);
umem->nmap = ib_dma_map_sg(device, if (access & IB_ACCESS_RELAXED_ORDERING)
umem->sg_head.sgl, dma_attr |= DMA_ATTR_WEAK_ORDERING;
umem->sg_nents,
DMA_BIDIRECTIONAL); umem->nmap =
ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents,
DMA_BIDIRECTIONAL, dma_attr);
if (!umem->nmap) { if (!umem->nmap) {
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -54,8 +54,6 @@ ...@@ -54,8 +54,6 @@
#include "core_priv.h" #include "core_priv.h"
#include <trace/events/rdma_core.h> #include <trace/events/rdma_core.h>
#include <trace/events/rdma_core.h>
static int ib_resolve_eth_dmac(struct ib_device *device, static int ib_resolve_eth_dmac(struct ib_device *device,
struct rdma_ah_attr *ah_attr); struct rdma_ah_attr *ah_attr);
...@@ -1127,8 +1125,7 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, ...@@ -1127,8 +1125,7 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
EXPORT_SYMBOL(ib_open_qp); EXPORT_SYMBOL(ib_open_qp);
static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp, static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
struct ib_qp_init_attr *qp_init_attr, struct ib_qp_init_attr *qp_init_attr)
struct ib_udata *udata)
{ {
struct ib_qp *real_qp = qp; struct ib_qp *real_qp = qp;
...@@ -1150,9 +1147,18 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp, ...@@ -1150,9 +1147,18 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
return qp; return qp;
} }
struct ib_qp *ib_create_qp_user(struct ib_pd *pd, /**
struct ib_qp_init_attr *qp_init_attr, * ib_create_qp - Creates a kernel QP associated with the specified protection
struct ib_udata *udata) * domain.
* @pd: The protection domain associated with the QP.
* @qp_init_attr: A list of initial attributes required to create the
* QP. If QP creation succeeds, then the attributes are updated to
* the actual capabilities of the created QP.
*
* NOTE: for user qp use ib_create_qp_user with valid udata!
*/
struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr)
{ {
struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device; struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
struct ib_qp *qp; struct ib_qp *qp;
...@@ -1187,7 +1193,7 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd, ...@@ -1187,7 +1193,7 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
struct ib_qp *xrc_qp = struct ib_qp *xrc_qp =
create_xrc_qp_user(qp, qp_init_attr, udata); create_xrc_qp_user(qp, qp_init_attr);
if (IS_ERR(xrc_qp)) { if (IS_ERR(xrc_qp)) {
ret = PTR_ERR(xrc_qp); ret = PTR_ERR(xrc_qp);
...@@ -1243,7 +1249,7 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd, ...@@ -1243,7 +1249,7 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
EXPORT_SYMBOL(ib_create_qp_user); EXPORT_SYMBOL(ib_create_qp);
static const struct { static const struct {
int valid; int valid;
......
...@@ -89,6 +89,15 @@ ...@@ -89,6 +89,15 @@
#define BNXT_RE_DEFAULT_ACK_DELAY 16 #define BNXT_RE_DEFAULT_ACK_DELAY 16
struct bnxt_re_ring_attr {
dma_addr_t *dma_arr;
int pages;
int type;
u32 depth;
u32 lrid; /* Logical ring id */
u8 mode;
};
struct bnxt_re_work { struct bnxt_re_work {
struct work_struct work; struct work_struct work;
unsigned long event; unsigned long event;
...@@ -104,6 +113,14 @@ struct bnxt_re_sqp_entries { ...@@ -104,6 +113,14 @@ struct bnxt_re_sqp_entries {
struct bnxt_re_qp *qp1_qp; struct bnxt_re_qp *qp1_qp;
}; };
#define BNXT_RE_MAX_GSI_SQP_ENTRIES 1024
struct bnxt_re_gsi_context {
struct bnxt_re_qp *gsi_qp;
struct bnxt_re_qp *gsi_sqp;
struct bnxt_re_ah *gsi_sah;
struct bnxt_re_sqp_entries *sqp_tbl;
};
#define BNXT_RE_MIN_MSIX 2 #define BNXT_RE_MIN_MSIX 2
#define BNXT_RE_MAX_MSIX 9 #define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0 #define BNXT_RE_AEQ_IDX 0
...@@ -115,7 +132,6 @@ struct bnxt_re_dev { ...@@ -115,7 +132,6 @@ struct bnxt_re_dev {
struct list_head list; struct list_head list;
unsigned long flags; unsigned long flags;
#define BNXT_RE_FLAG_NETDEV_REGISTERED 0 #define BNXT_RE_FLAG_NETDEV_REGISTERED 0
#define BNXT_RE_FLAG_IBDEV_REGISTERED 1
#define BNXT_RE_FLAG_GOT_MSIX 2 #define BNXT_RE_FLAG_GOT_MSIX 2
#define BNXT_RE_FLAG_HAVE_L2_REF 3 #define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
...@@ -125,7 +141,7 @@ struct bnxt_re_dev { ...@@ -125,7 +141,7 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev; struct net_device *netdev;
unsigned int version, major, minor; unsigned int version, major, minor;
struct bnxt_qplib_chip_ctx chip_ctx; struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev; struct bnxt_en_dev *en_dev;
struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX]; struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
int num_msix; int num_msix;
...@@ -160,15 +176,11 @@ struct bnxt_re_dev { ...@@ -160,15 +176,11 @@ struct bnxt_re_dev {
atomic_t srq_count; atomic_t srq_count;
atomic_t mr_count; atomic_t mr_count;
atomic_t mw_count; atomic_t mw_count;
atomic_t sched_count;
/* Max of 2 lossless traffic class supported per port */ /* Max of 2 lossless traffic class supported per port */
u16 cosq[2]; u16 cosq[2];
/* QP for for handling QP1 packets */ /* QP for for handling QP1 packets */
u32 sqp_id; struct bnxt_re_gsi_context gsi_ctx;
struct bnxt_re_qp *qp1_sqp;
struct bnxt_re_ah *sqp_ah;
struct bnxt_re_sqp_entries sqp_tbl[1024];
atomic_t nq_alloc_cnt; atomic_t nq_alloc_cnt;
u32 is_virtfn; u32 is_virtfn;
u32 num_vfs; u32 num_vfs;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
struct bnxt_qplib_srq { struct bnxt_qplib_srq {
struct bnxt_qplib_pd *pd; struct bnxt_qplib_pd *pd;
struct bnxt_qplib_dpi *dpi; struct bnxt_qplib_dpi *dpi;
void __iomem *dbr_base; struct bnxt_qplib_db_info dbinfo;
u64 srq_handle; u64 srq_handle;
u32 id; u32 id;
u32 max_wqe; u32 max_wqe;
...@@ -236,6 +236,7 @@ struct bnxt_qplib_swqe { ...@@ -236,6 +236,7 @@ struct bnxt_qplib_swqe {
struct bnxt_qplib_q { struct bnxt_qplib_q {
struct bnxt_qplib_hwq hwq; struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_swq *swq; struct bnxt_qplib_swq *swq;
struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sg_info; struct bnxt_qplib_sg_info sg_info;
u32 max_wqe; u32 max_wqe;
u16 q_full_delta; u16 q_full_delta;
...@@ -370,7 +371,7 @@ struct bnxt_qplib_cqe { ...@@ -370,7 +371,7 @@ struct bnxt_qplib_cqe {
#define BNXT_QPLIB_QUEUE_START_PERIOD 0x01 #define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
struct bnxt_qplib_cq { struct bnxt_qplib_cq {
struct bnxt_qplib_dpi *dpi; struct bnxt_qplib_dpi *dpi;
void __iomem *dbr_base; struct bnxt_qplib_db_info dbinfo;
u32 max_wqe; u32 max_wqe;
u32 id; u32 id;
u16 count; u16 count;
...@@ -401,6 +402,7 @@ struct bnxt_qplib_cq { ...@@ -401,6 +402,7 @@ struct bnxt_qplib_cq {
* of the same QP while manipulating the flush list. * of the same QP while manipulating the flush list.
*/ */
spinlock_t flush_lock; /* QP flush management */ spinlock_t flush_lock; /* QP flush management */
u16 cnq_events;
}; };
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
...@@ -433,66 +435,32 @@ struct bnxt_qplib_cq { ...@@ -433,66 +435,32 @@ struct bnxt_qplib_cq {
NQ_DB_IDX_VALID | \ NQ_DB_IDX_VALID | \
NQ_DB_IRQ_DIS) NQ_DB_IRQ_DIS)
static inline void bnxt_qplib_ring_nq_db64(void __iomem *db, u32 index, struct bnxt_qplib_nq_db {
u32 xid, bool arm) struct bnxt_qplib_reg_desc reg;
{ struct bnxt_qplib_db_info dbinfo;
u64 val; };
val = xid & DBC_DBC_XID_MASK;
val |= DBC_DBC_PATH_ROCE;
val |= arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
val <<= 32;
val |= index & DBC_DBC_INDEX_MASK;
writeq(val, db);
}
static inline void bnxt_qplib_ring_nq_db_rearm(void __iomem *db, u32 raw_cons,
u32 max_elements, u32 xid,
bool gen_p5)
{
u32 index = raw_cons & (max_elements - 1);
if (gen_p5)
bnxt_qplib_ring_nq_db64(db, index, xid, true);
else
writel(NQ_DB_CP_FLAGS_REARM | (index & DBC_DBC32_XID_MASK), db);
}
static inline void bnxt_qplib_ring_nq_db(void __iomem *db, u32 raw_cons, typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq,
u32 max_elements, u32 xid, struct bnxt_qplib_cq *cq);
bool gen_p5) typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq,
{ struct bnxt_qplib_srq *srq, u8 event);
u32 index = raw_cons & (max_elements - 1);
if (gen_p5)
bnxt_qplib_ring_nq_db64(db, index, xid, false);
else
writel(NQ_DB_CP_FLAGS | (index & DBC_DBC32_XID_MASK), db);
}
struct bnxt_qplib_nq { struct bnxt_qplib_nq {
struct pci_dev *pdev; struct pci_dev *pdev;
struct bnxt_qplib_res *res; struct bnxt_qplib_res *res;
char name[32];
int vector; struct bnxt_qplib_hwq hwq;
cpumask_t mask; struct bnxt_qplib_nq_db nq_db;
int budget; u16 ring_id;
bool requested; int msix_vec;
struct tasklet_struct worker; cpumask_t mask;
struct bnxt_qplib_hwq hwq; struct tasklet_struct nq_tasklet;
bool requested;
u16 bar_reg; int budget;
u32 bar_reg_off;
u16 ring_id; cqn_handler_t cqn_handler;
void __iomem *bar_reg_iomem; srqn_handler_t srqn_handler;
struct workqueue_struct *cqn_wq;
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *cq);
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_srq *srq,
u8 event);
struct workqueue_struct *cqn_wq;
char name[32];
}; };
struct bnxt_qplib_nq_work { struct bnxt_qplib_nq_work {
...@@ -507,11 +475,8 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, ...@@ -507,11 +475,8 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
int msix_vector, bool need_init); int msix_vector, bool need_init);
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int nq_idx, int msix_vector, int bar_reg_offset, int nq_idx, int msix_vector, int bar_reg_offset,
int (*cqn_handler)(struct bnxt_qplib_nq *nq, cqn_handler_t cqn_handler,
struct bnxt_qplib_cq *cq), srqn_handler_t srq_handler);
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_srq *srq,
u8 event));
int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq); struct bnxt_qplib_srq *srq);
int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
...@@ -550,7 +515,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, ...@@ -550,7 +515,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq); bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq);
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
unsigned long *flags); unsigned long *flags);
......
This diff is collapsed.
...@@ -206,8 +206,9 @@ static inline void bnxt_qplib_ring_creq_db(void __iomem *db, u32 raw_cons, ...@@ -206,8 +206,9 @@ static inline void bnxt_qplib_ring_creq_db(void __iomem *db, u32 raw_cons,
#define CREQ_ENTRY_POLL_BUDGET 0x100 #define CREQ_ENTRY_POLL_BUDGET 0x100
/* HWQ */ /* HWQ */
typedef int (*aeq_handler_t)(struct bnxt_qplib_rcfw *, void *, void *);
struct bnxt_qplib_crsq { struct bnxt_qplib_crsqe {
struct creq_qp_event *resp; struct creq_qp_event *resp;
u32 req_size; u32 req_size;
}; };
...@@ -225,41 +226,53 @@ struct bnxt_qplib_qp_node { ...@@ -225,41 +226,53 @@ struct bnxt_qplib_qp_node {
#define BNXT_QPLIB_OOS_COUNT_MASK 0xFFFFFFFF #define BNXT_QPLIB_OOS_COUNT_MASK 0xFFFFFFFF
#define FIRMWARE_INITIALIZED_FLAG (0)
#define FIRMWARE_FIRST_FLAG (31)
#define FIRMWARE_TIMED_OUT (3)
struct bnxt_qplib_cmdq_mbox {
struct bnxt_qplib_reg_desc reg;
void __iomem *prod;
void __iomem *db;
};
struct bnxt_qplib_cmdq_ctx {
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_cmdq_mbox cmdq_mbox;
wait_queue_head_t waitq;
unsigned long flags;
unsigned long *cmdq_bitmap;
u32 bmap_size;
u32 seq_num;
};
struct bnxt_qplib_creq_db {
struct bnxt_qplib_reg_desc reg;
struct bnxt_qplib_db_info dbinfo;
};
struct bnxt_qplib_creq_stat {
u64 creq_qp_event_processed;
u64 creq_func_event_processed;
};
struct bnxt_qplib_creq_ctx {
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_creq_db creq_db;
struct bnxt_qplib_creq_stat stats;
struct tasklet_struct creq_tasklet;
aeq_handler_t aeq_handler;
u16 ring_id;
int msix_vec;
bool requested; /*irq handler installed */
};
/* RCFW Communication Channels */ /* RCFW Communication Channels */
struct bnxt_qplib_rcfw { struct bnxt_qplib_rcfw {
struct pci_dev *pdev; struct pci_dev *pdev;
struct bnxt_qplib_res *res; struct bnxt_qplib_res *res;
int vector; struct bnxt_qplib_cmdq_ctx cmdq;
struct tasklet_struct worker; struct bnxt_qplib_creq_ctx creq;
bool requested; struct bnxt_qplib_crsqe *crsqe_tbl;
unsigned long *cmdq_bitmap;
u32 bmap_size;
unsigned long flags;
#define FIRMWARE_INITIALIZED_FLAG 0
#define FIRMWARE_FIRST_FLAG 31
#define FIRMWARE_TIMED_OUT 3
wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
void *, void *);
u32 seq_num;
/* Bar region info */
void __iomem *cmdq_bar_reg_iomem;
u16 cmdq_bar_reg;
u16 cmdq_bar_reg_prod_off;
u16 cmdq_bar_reg_trig_off;
u16 creq_ring_id;
u16 creq_bar_reg;
void __iomem *creq_bar_reg_iomem;
/* Cmd-Resp and Async Event notification queue */
struct bnxt_qplib_hwq creq;
u64 creq_qp_event_processed;
u64 creq_func_event_processed;
/* Actual Cmd and Resp Queues */
struct bnxt_qplib_hwq cmdq;
struct bnxt_qplib_crsq *crsqe_tbl;
int qp_tbl_size; int qp_tbl_size;
struct bnxt_qplib_qp_node *qp_tbl; struct bnxt_qplib_qp_node *qp_tbl;
u64 oos_prev; u64 oos_prev;
...@@ -268,7 +281,7 @@ struct bnxt_qplib_rcfw { ...@@ -268,7 +281,7 @@ struct bnxt_qplib_rcfw {
}; };
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, struct bnxt_qplib_ctx *ctx,
int qp_tbl_sz); int qp_tbl_sz);
...@@ -276,12 +289,10 @@ void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill); ...@@ -276,12 +289,10 @@ void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
bool need_init); bool need_init);
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_rcfw *rcfw,
int msix_vector, int msix_vector,
int cp_bar_reg_off, int virt_fn, int cp_bar_reg_off, int virt_fn,
int (*aeq_handler)(struct bnxt_qplib_rcfw *, aeq_handler_t aeq_handler);
void *aeqe, void *obj));
struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_rcfw *rcfw,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -707,7 +707,7 @@ struct mpa_message { ...@@ -707,7 +707,7 @@ struct mpa_message {
u8 flags; u8 flags;
u8 revision; u8 revision;
__be16 private_data_size; __be16 private_data_size;
u8 private_data[0]; u8 private_data[];
}; };
struct mpa_v2_conn_params { struct mpa_v2_conn_params {
...@@ -719,7 +719,7 @@ struct terminate_message { ...@@ -719,7 +719,7 @@ struct terminate_message {
u8 layer_etype; u8 layer_etype;
u8 ecode; u8 ecode;
__be16 hdrct_rsvd; __be16 hdrct_rsvd;
u8 len_hdrs[0]; u8 len_hdrs[];
}; };
#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28) #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
......
...@@ -2127,7 +2127,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -2127,7 +2127,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
pr_debug("ib_pd %p\n", pd); pr_debug("ib_pd %p\n", pd);
if (attrs->qp_type != IB_QPT_RC) if (attrs->qp_type != IB_QPT_RC)
return ERR_PTR(-EINVAL); return ERR_PTR(-EOPNOTSUPP);
php = to_c4iw_pd(pd); php = to_c4iw_pd(pd);
rhp = php->rhp; rhp = php->rhp;
......
...@@ -123,7 +123,7 @@ struct fw_ri_dsgl { ...@@ -123,7 +123,7 @@ struct fw_ri_dsgl {
__be32 len0; __be32 len0;
__be64 addr0; __be64 addr0;
#ifndef C99_NOT_SUPPORTED #ifndef C99_NOT_SUPPORTED
struct fw_ri_dsge_pair sge[0]; struct fw_ri_dsge_pair sge[];
#endif #endif
}; };
...@@ -139,7 +139,7 @@ struct fw_ri_isgl { ...@@ -139,7 +139,7 @@ struct fw_ri_isgl {
__be16 nsge; __be16 nsge;
__be32 r2; __be32 r2;
#ifndef C99_NOT_SUPPORTED #ifndef C99_NOT_SUPPORTED
struct fw_ri_sge sge[0]; struct fw_ri_sge sge[];
#endif #endif
}; };
...@@ -149,7 +149,7 @@ struct fw_ri_immd { ...@@ -149,7 +149,7 @@ struct fw_ri_immd {
__be16 r2; __be16 r2;
__be32 immdlen; __be32 immdlen;
#ifndef C99_NOT_SUPPORTED #ifndef C99_NOT_SUPPORTED
__u8 data[0]; __u8 data[];
#endif #endif
}; };
...@@ -321,7 +321,7 @@ struct fw_ri_res_wr { ...@@ -321,7 +321,7 @@ struct fw_ri_res_wr {
__be32 len16_pkd; __be32 len16_pkd;
__u64 cookie; __u64 cookie;
#ifndef C99_NOT_SUPPORTED #ifndef C99_NOT_SUPPORTED
struct fw_ri_res res[0]; struct fw_ri_res res[];
#endif #endif
}; };
......
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/* /*
* Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved. * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/ */
#ifndef _EFA_ADMIN_CMDS_H_ #ifndef _EFA_ADMIN_CMDS_H_
...@@ -801,21 +801,16 @@ struct efa_admin_mmio_req_read_less_resp { ...@@ -801,21 +801,16 @@ struct efa_admin_mmio_req_read_less_resp {
/* create_qp_cmd */ /* create_qp_cmd */
#define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0) #define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0)
#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_SHIFT 1
#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1) #define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1)
/* reg_mr_cmd */ /* reg_mr_cmd */
#define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(4, 0) #define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(4, 0)
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_SHIFT 7
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7) #define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7)
#define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0) #define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0)
#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_SHIFT 2
#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2) #define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2)
/* create_cq_cmd */ /* create_cq_cmd */
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5) #define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_SHIFT 6
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6) #define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) #define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment