Commit 303851e1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Not much exciting here, almost entirely syzkaller fixes.

  This is going to be on ongoing theme for some time, I think. Both
  Google and Mellanox are now running syzkaller on different parts of
  the user API.

  Summary:

   - Many bug fixes related to syzkaller from Leon Romanovsky. These are
     still for the mlx driver and ucma interface.

   - Fix a situation with port reuse for iWarp, discovered during
     scale-up testing

   - Bug fixes for the profile and restrack patches accepted during this
     merge window

   - Compile warning cleanups from Arnd, this is apparently the last
     warning to make 32 bit builds quiet"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/ucma: Ensure that CM_ID exists prior to access it
  RDMA/verbs: Remove restrack entry from XRCD structure
  RDMA/ucma: Fix use-after-free access in ucma_close
  RDMA/ucma: Check AF family prior resolving address
  infiniband: bnxt_re: use BIT_ULL() for 64-bit bit masks
  infiniband: qplib_fp: fix pointer cast
  IB/mlx5: Fix cleanup order on unload
  RDMA/ucma: Don't allow join attempts for unsupported AF family
  RDMA/ucma: Fix access to non-initialized CM_ID object
  RDMA/core: Do not use invalid destination in determining port reuse
  RDMA/mlx5: Fix crash while accessing garbage pointer and freed memory
  IB/mlx5: Fix integer overflows in mlx5_ib_create_srq
  IB/mlx5: Fix out-of-bounds read in create_raw_packet_qp_rq
parents 76c0b6a3 e8980d67
...@@ -3069,7 +3069,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, ...@@ -3069,7 +3069,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
continue; continue;
/* different dest port -> unique */ /* different dest port -> unique */
if (!cma_any_port(cur_daddr) && if (!cma_any_port(daddr) &&
!cma_any_port(cur_daddr) &&
(dport != cur_dport)) (dport != cur_dport))
continue; continue;
...@@ -3080,7 +3081,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, ...@@ -3080,7 +3081,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
continue; continue;
/* different dst address -> unique */ /* different dst address -> unique */
if (!cma_any_addr(cur_daddr) && if (!cma_any_addr(daddr) &&
!cma_any_addr(cur_daddr) &&
cma_addr_cmp(daddr, cur_daddr)) cma_addr_cmp(daddr, cur_daddr))
continue; continue;
...@@ -3378,13 +3380,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) ...@@ -3378,13 +3380,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
} }
#endif #endif
} }
daddr = cma_dst_addr(id_priv);
daddr->sa_family = addr->sa_family;
ret = cma_get_port(id_priv); ret = cma_get_port(id_priv);
if (ret) if (ret)
goto err2; goto err2;
daddr = cma_dst_addr(id_priv);
daddr->sa_family = addr->sa_family;
return 0; return 0;
err2: err2:
if (id_priv->cma_dev) if (id_priv->cma_dev)
...@@ -4173,6 +4175,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, ...@@ -4173,6 +4175,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
struct cma_multicast *mc; struct cma_multicast *mc;
int ret; int ret;
if (!id->device)
return -EINVAL;
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
!cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
......
...@@ -132,7 +132,7 @@ static inline struct ucma_context *_ucma_find_context(int id, ...@@ -132,7 +132,7 @@ static inline struct ucma_context *_ucma_find_context(int id,
ctx = idr_find(&ctx_idr, id); ctx = idr_find(&ctx_idr, id);
if (!ctx) if (!ctx)
ctx = ERR_PTR(-ENOENT); ctx = ERR_PTR(-ENOENT);
else if (ctx->file != file) else if (ctx->file != file || !ctx->cm_id)
ctx = ERR_PTR(-EINVAL); ctx = ERR_PTR(-EINVAL);
return ctx; return ctx;
} }
...@@ -456,6 +456,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, ...@@ -456,6 +456,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id cmd;
struct rdma_ucm_create_id_resp resp; struct rdma_ucm_create_id_resp resp;
struct ucma_context *ctx; struct ucma_context *ctx;
struct rdma_cm_id *cm_id;
enum ib_qp_type qp_type; enum ib_qp_type qp_type;
int ret; int ret;
...@@ -476,10 +477,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, ...@@ -476,10 +477,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
return -ENOMEM; return -ENOMEM;
ctx->uid = cmd.uid; ctx->uid = cmd.uid;
ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, cm_id = rdma_create_id(current->nsproxy->net_ns,
ucma_event_handler, ctx, cmd.ps, qp_type); ucma_event_handler, ctx, cmd.ps, qp_type);
if (IS_ERR(ctx->cm_id)) { if (IS_ERR(cm_id)) {
ret = PTR_ERR(ctx->cm_id); ret = PTR_ERR(cm_id);
goto err1; goto err1;
} }
...@@ -489,14 +490,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, ...@@ -489,14 +490,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
ret = -EFAULT; ret = -EFAULT;
goto err2; goto err2;
} }
ctx->cm_id = cm_id;
return 0; return 0;
err2: err2:
rdma_destroy_id(ctx->cm_id); rdma_destroy_id(cm_id);
err1: err1:
mutex_lock(&mut); mutex_lock(&mut);
idr_remove(&ctx_idr, ctx->id); idr_remove(&ctx_idr, ctx->id);
mutex_unlock(&mut); mutex_unlock(&mut);
mutex_lock(&file->mut);
list_del(&ctx->list);
mutex_unlock(&file->mut);
kfree(ctx); kfree(ctx);
return ret; return ret;
} }
...@@ -664,19 +670,23 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, ...@@ -664,19 +670,23 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
int in_len, int out_len) int in_len, int out_len)
{ {
struct rdma_ucm_resolve_ip cmd; struct rdma_ucm_resolve_ip cmd;
struct sockaddr *src, *dst;
struct ucma_context *ctx; struct ucma_context *ctx;
int ret; int ret;
if (copy_from_user(&cmd, inbuf, sizeof(cmd))) if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
src = (struct sockaddr *) &cmd.src_addr;
dst = (struct sockaddr *) &cmd.dst_addr;
if (!rdma_addr_size(src) || !rdma_addr_size(dst))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id); ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
(struct sockaddr *) &cmd.dst_addr,
cmd.timeout_ms);
ucma_put_ctx(ctx); ucma_put_ctx(ctx);
return ret; return ret;
} }
...@@ -1349,7 +1359,7 @@ static ssize_t ucma_process_join(struct ucma_file *file, ...@@ -1349,7 +1359,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
return -ENOSPC; return -ENOSPC;
addr = (struct sockaddr *) &cmd->addr; addr = (struct sockaddr *) &cmd->addr;
if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) if (cmd->addr_size != rdma_addr_size(addr))
return -EINVAL; return -EINVAL;
if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
...@@ -1417,6 +1427,9 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file, ...@@ -1417,6 +1427,9 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
join_cmd.uid = cmd.uid; join_cmd.uid = cmd.uid;
join_cmd.id = cmd.id; join_cmd.id = cmd.id;
join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
if (!join_cmd.addr_size)
return -EINVAL;
join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
...@@ -1432,6 +1445,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, ...@@ -1432,6 +1445,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd))) if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
if (!rdma_addr_size((struct sockaddr *)&cmd.addr))
return -EINVAL;
return ucma_process_join(file, &cmd, out_len); return ucma_process_join(file, &cmd, out_len);
} }
......
...@@ -57,8 +57,8 @@ ...@@ -57,8 +57,8 @@
#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M) #define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G) #define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
#define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G) #define BNXT_RE_MAX_MR_SIZE_LOW BIT_ULL(BNXT_RE_PAGE_SHIFT_1G)
#define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39) #define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH #define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024) #define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
......
...@@ -3598,7 +3598,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, ...@@ -3598,7 +3598,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
int umem_pgs, page_shift, rc; int umem_pgs, page_shift, rc;
if (length > BNXT_RE_MAX_MR_SIZE) { if (length > BNXT_RE_MAX_MR_SIZE) {
dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n", dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
length, BNXT_RE_MAX_MR_SIZE); length, BNXT_RE_MAX_MR_SIZE);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
......
...@@ -243,7 +243,7 @@ static void bnxt_qplib_service_nq(unsigned long data) ...@@ -243,7 +243,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
u32 sw_cons, raw_cons; u32 sw_cons, raw_cons;
u16 type; u16 type;
int budget = nq->budget; int budget = nq->budget;
u64 q_handle; uintptr_t q_handle;
/* Service the NQ until empty */ /* Service the NQ until empty */
raw_cons = hwq->cons; raw_cons = hwq->cons;
...@@ -526,7 +526,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, ...@@ -526,7 +526,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
/* Configure the request */ /* Configure the request */
req.dpi = cpu_to_le32(srq->dpi->dpi); req.dpi = cpu_to_le32(srq->dpi->dpi);
req.srq_handle = cpu_to_le64(srq); req.srq_handle = cpu_to_le64((uintptr_t)srq);
req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements); req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
pbl = &srq->hwq.pbl[PBL_LVL_0]; pbl = &srq->hwq.pbl[PBL_LVL_0];
......
...@@ -4860,19 +4860,19 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) ...@@ -4860,19 +4860,19 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
return ib_register_device(&dev->ib_dev, NULL); return ib_register_device(&dev->ib_dev, NULL);
} }
static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
{ {
ib_unregister_device(&dev->ib_dev); destroy_umrc_res(dev);
} }
static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev) static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
{ {
return create_umr_res(dev); ib_unregister_device(&dev->ib_dev);
} }
static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev) static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
{ {
destroy_umrc_res(dev); return create_umr_res(dev);
} }
static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
...@@ -4982,12 +4982,15 @@ static const struct mlx5_ib_profile pf_profile = { ...@@ -4982,12 +4982,15 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_BFREG, STAGE_CREATE(MLX5_IB_STAGE_BFREG,
mlx5_ib_stage_bfrag_init, mlx5_ib_stage_bfrag_init,
mlx5_ib_stage_bfrag_cleanup), mlx5_ib_stage_bfrag_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
NULL,
mlx5_ib_stage_pre_ib_reg_umr_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_IB_REG, STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
mlx5_ib_stage_ib_reg_init, mlx5_ib_stage_ib_reg_init,
mlx5_ib_stage_ib_reg_cleanup), mlx5_ib_stage_ib_reg_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
mlx5_ib_stage_umr_res_init, mlx5_ib_stage_post_ib_reg_umr_init,
mlx5_ib_stage_umr_res_cleanup), NULL),
STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
mlx5_ib_stage_delay_drop_init, mlx5_ib_stage_delay_drop_init,
mlx5_ib_stage_delay_drop_cleanup), mlx5_ib_stage_delay_drop_cleanup),
......
...@@ -739,8 +739,9 @@ enum mlx5_ib_stages { ...@@ -739,8 +739,9 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_CONG_DEBUGFS, MLX5_IB_STAGE_CONG_DEBUGFS,
MLX5_IB_STAGE_UAR, MLX5_IB_STAGE_UAR,
MLX5_IB_STAGE_BFREG, MLX5_IB_STAGE_BFREG,
MLX5_IB_STAGE_PRE_IB_REG_UMR,
MLX5_IB_STAGE_IB_REG, MLX5_IB_STAGE_IB_REG,
MLX5_IB_STAGE_UMR_RESOURCES, MLX5_IB_STAGE_POST_IB_REG_UMR,
MLX5_IB_STAGE_DELAY_DROP, MLX5_IB_STAGE_DELAY_DROP,
MLX5_IB_STAGE_CLASS_ATTR, MLX5_IB_STAGE_CLASS_ATTR,
MLX5_IB_STAGE_MAX, MLX5_IB_STAGE_MAX,
......
...@@ -838,7 +838,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, ...@@ -838,7 +838,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
*umem = ib_umem_get(pd->uobject->context, start, length, *umem = ib_umem_get(pd->uobject->context, start, length,
access_flags, 0); access_flags, 0);
err = PTR_ERR_OR_ZERO(*umem); err = PTR_ERR_OR_ZERO(*umem);
if (err < 0) { if (err) {
*umem = NULL;
mlx5_ib_err(dev, "umem get failed (%d)\n", err); mlx5_ib_err(dev, "umem get failed (%d)\n", err);
return err; return err;
} }
...@@ -1415,6 +1416,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, ...@@ -1415,6 +1416,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
if (err) { if (err) {
mlx5_ib_warn(dev, "Failed to rereg UMR\n"); mlx5_ib_warn(dev, "Failed to rereg UMR\n");
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
mr->umem = NULL;
clean_mr(dev, mr); clean_mr(dev, mr);
return err; return err;
} }
...@@ -1498,14 +1500,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -1498,14 +1500,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
u32 key = mr->mmkey.key; u32 key = mr->mmkey.key;
err = destroy_mkey(dev, mr); err = destroy_mkey(dev, mr);
kfree(mr);
if (err) { if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
key, err); key, err);
return err; return err;
} }
} else {
mlx5_mr_cache_free(dev, mr);
} }
return 0; return 0;
...@@ -1548,6 +1547,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -1548,6 +1547,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
atomic_sub(npages, &dev->mdev->priv.reg_pages); atomic_sub(npages, &dev->mdev->priv.reg_pages);
} }
if (!mr->allocated_from_cache)
kfree(mr);
else
mlx5_mr_cache_free(dev, mr);
return 0; return 0;
} }
......
...@@ -1161,7 +1161,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, ...@@ -1161,7 +1161,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
ib_umem_release(sq->ubuffer.umem); ib_umem_release(sq->ubuffer.umem);
} }
static int get_rq_pas_size(void *qpc) static size_t get_rq_pas_size(void *qpc)
{ {
u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12; u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
...@@ -1177,7 +1177,8 @@ static int get_rq_pas_size(void *qpc) ...@@ -1177,7 +1177,8 @@ static int get_rq_pas_size(void *qpc)
} }
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, void *qpin) struct mlx5_ib_rq *rq, void *qpin,
size_t qpinlen)
{ {
struct mlx5_ib_qp *mqp = rq->base.container_mibqp; struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
__be64 *pas; __be64 *pas;
...@@ -1186,9 +1187,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, ...@@ -1186,9 +1187,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
void *rqc; void *rqc;
void *wq; void *wq;
void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
int inlen; size_t rq_pas_size = get_rq_pas_size(qpc);
size_t inlen;
int err; int err;
u32 rq_pas_size = get_rq_pas_size(qpc);
if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
return -EINVAL;
inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
...@@ -1277,7 +1281,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, ...@@ -1277,7 +1281,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
} }
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 *in, u32 *in, size_t inlen,
struct ib_pd *pd) struct ib_pd *pd)
{ {
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
...@@ -1309,7 +1313,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -1309,7 +1313,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING) if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
err = create_raw_packet_qp_rq(dev, rq, in); err = create_raw_packet_qp_rq(dev, rq, in, inlen);
if (err) if (err)
goto err_destroy_sq; goto err_destroy_sq;
...@@ -1872,11 +1876,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1872,11 +1876,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
} }
} }
if (inlen < 0) {
err = -EINVAL;
goto err;
}
if (init_attr->qp_type == IB_QPT_RAW_PACKET || if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) { qp->flags & MLX5_IB_QP_UNDERLAY) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
err = create_raw_packet_qp(dev, qp, in, pd); err = create_raw_packet_qp(dev, qp, in, inlen, pd);
} else { } else {
err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
} }
......
...@@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, ...@@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
{ {
struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_srq *srq; struct mlx5_ib_srq *srq;
int desc_size; size_t desc_size;
int buf_size; size_t buf_size;
int err; int err;
struct mlx5_srq_attr in = {0}; struct mlx5_srq_attr in = {0};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
...@@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, ...@@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
if (desc_size == 0 || srq->msrq.max_gs > desc_size)
return ERR_PTR(-EINVAL);
desc_size = roundup_pow_of_two(desc_size); desc_size = roundup_pow_of_two(desc_size);
desc_size = max_t(int, 32, desc_size); desc_size = max_t(size_t, 32, desc_size);
if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
return ERR_PTR(-EINVAL);
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
sizeof(struct mlx5_wqe_data_seg); sizeof(struct mlx5_wqe_data_seg);
srq->msrq.wqe_shift = ilog2(desc_size); srq->msrq.wqe_shift = ilog2(desc_size);
buf_size = srq->msrq.max * desc_size; buf_size = srq->msrq.max * desc_size;
mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", if (buf_size < desc_size)
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, return ERR_PTR(-EINVAL);
srq->msrq.max_avail_gather);
in.type = init_attr->srq_type; in.type = init_attr->srq_type;
if (pd->uobject) if (pd->uobject)
......
...@@ -453,8 +453,8 @@ struct mlx5_core_srq { ...@@ -453,8 +453,8 @@ struct mlx5_core_srq {
struct mlx5_core_rsc_common common; /* must be first */ struct mlx5_core_rsc_common common; /* must be first */
u32 srqn; u32 srqn;
int max; int max;
int max_gs; size_t max_gs;
int max_avail_gather; size_t max_avail_gather;
int wqe_shift; int wqe_shift;
void (*event) (struct mlx5_core_srq *, enum mlx5_event); void (*event) (struct mlx5_core_srq *, enum mlx5_event);
......
...@@ -1537,10 +1537,6 @@ struct ib_xrcd { ...@@ -1537,10 +1537,6 @@ struct ib_xrcd {
struct mutex tgt_qp_mutex; struct mutex tgt_qp_mutex;
struct list_head tgt_qp_list; struct list_head tgt_qp_list;
/*
* Implementation details of the RDMA core, don't use in drivers:
*/
struct rdma_restrack_entry res;
}; };
struct ib_ah { struct ib_ah {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment