Commit ae2911de authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Two more merge window regressions, a corruption bug in hfi1 and a few
  other small fixes.

   - Missing user input validation regression in ucma

   - Disallowing a previously allowed user combination regression in
     mlx5

   - ODP prefetch memory leaking triggerable by userspace

   - Memory corruption in hf1 due to faulty ring buffer logic

   - Missed mutex initialization crash in mlx5

   - Two small defects with RDMA DIM"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/core: Free DIM memory in error unwind
  RDMA/core: Stop DIM before destroying CQ
  RDMA/mlx5: Initialize QP mutex for the debug kernels
  IB/rdmavt: Fix RQ counting issues causing use of an invalid RWQE
  RDMA/mlx5: Allow providing extra scatter CQE QP flag
  RDMA/mlx5: Fix prefetch memory leak if get_prefetchable_mr fails
  RDMA/cm: Add min length checks to user structure copies
parents 78431ab7 fb448ce8
......@@ -72,6 +72,15 @@ static void rdma_dim_init(struct ib_cq *cq)
INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
}
static void rdma_dim_destroy(struct ib_cq *cq)
{
if (!cq->dim)
return;
cancel_work_sync(&cq->dim->work);
kfree(cq->dim);
}
static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
{
int rc;
......@@ -266,6 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
return cq;
out_destroy_cq:
rdma_dim_destroy(cq);
rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata);
out_free_wc:
......@@ -331,12 +341,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
WARN_ON_ONCE(1);
}
rdma_dim_destroy(cq);
trace_cq_free(cq);
rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata);
if (cq->dim)
cancel_work_sync(&cq->dim->work);
kfree(cq->dim);
kfree(cq->wc);
kfree(cq);
}
......
......@@ -1084,6 +1084,8 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
size_t in_size;
int ret;
if (in_len < offsetofend(typeof(cmd), reserved))
return -EINVAL;
in_size = min_t(size_t, in_len, sizeof(cmd));
if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
......@@ -1141,6 +1143,8 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
size_t in_size;
int ret;
if (in_len < offsetofend(typeof(cmd), reserved))
return -EINVAL;
in_size = min_t(size_t, in_len, sizeof(cmd));
if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
......
......@@ -1797,9 +1797,7 @@ static bool init_prefetch_work(struct ib_pd *pd,
work->frags[i].mr =
get_prefetchable_mr(pd, advice, sg_list[i].lkey);
if (!work->frags[i].mr) {
work->num_sge = i - 1;
if (i)
destroy_prefetch_work(work);
work->num_sge = i;
return false;
}
......@@ -1865,6 +1863,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
srcu_key = srcu_read_lock(&dev->odp_srcu);
if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
srcu_read_unlock(&dev->odp_srcu, srcu_key);
destroy_prefetch_work(work);
return -EINVAL;
}
queue_work(system_unbound_wq, &work->work);
......
......@@ -1766,15 +1766,14 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp,
struct ib_qp_init_attr *init_attr,
struct mlx5_ib_create_qp *ucmd,
void *qpc)
{
int scqe_sz;
bool allow_scat_cqe = false;
if (ucmd)
allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
return;
......@@ -1853,8 +1852,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 *in;
int err;
mutex_init(&qp->mutex);
if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
......@@ -1938,7 +1935,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
......@@ -2012,7 +2008,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
(qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
configure_requester_scat_cqe(dev, qp, init_attr, qpc);
if (qp->rq.wqe_cnt) {
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
......@@ -2129,7 +2125,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
......@@ -2543,13 +2538,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
return;
}
if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
switch (flag) {
case MLX5_QP_FLAG_SCATTER_CQE:
case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
/*
* We don't return error if this flag was provided,
* We don't return error if these flags were provided,
* and mlx5 doesn't have right capability.
*/
*flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
*flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
return;
default:
break;
}
mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
}
......@@ -2589,6 +2589,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
if (qp->type == IB_QPT_RAW_PACKET) {
cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
......@@ -2963,6 +2965,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
goto free_ucmd;
}
mutex_init(&qp->mutex);
qp->type = type;
if (udata) {
err = process_vendor_flags(dev, qp, params.ucmd, attr);
......
......@@ -901,8 +901,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->s_tail_ack_queue = 0;
qp->s_acked_ack_queue = 0;
qp->s_num_rd_atomic = 0;
if (qp->r_rq.kwq)
qp->r_rq.kwq->count = qp->r_rq.size;
qp->r_sge.num_sge = 0;
atomic_set(&qp->s_reserved_used, 0);
}
......@@ -2366,31 +2364,6 @@ static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
return 0;
}
/**
* get_count - count numbers of request work queue entries
* in circular buffer
* @rq: data structure for request queue entry
* @tail: tail indices of the circular buffer
* @head: head indices of the circular buffer
*
* Return - total number of entries in the circular buffer
*/
static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
{
u32 count;
count = head;
if (count >= rq->size)
count = 0;
if (count < tail)
count += rq->size - tail;
else
count -= tail;
return count;
}
/**
* get_rvt_head - get head indices of the circular buffer
* @rq: data structure for request queue entry
......@@ -2465,7 +2438,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
head = get_rvt_head(rq, ip);
kwq->count = get_count(rq, tail, head);
kwq->count = rvt_get_rq_count(rq, head, tail);
}
if (unlikely(kwq->count == 0)) {
ret = 0;
......@@ -2500,7 +2473,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
* the number of remaining WQEs.
*/
if (kwq->count < srq->limit) {
kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
kwq->count =
rvt_get_rq_count(rq,
get_rvt_head(rq, ip), tail);
if (kwq->count < srq->limit) {
struct ib_event ev;
......
......@@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
* not atomic, which is OK, since the fuzziness is
* resolved as further ACKs go out.
*/
credits = head - tail;
if ((int)credits < 0)
credits += qp->r_rq.size;
credits = rvt_get_rq_count(&qp->r_rq, head, tail);
}
/*
* Binary search the credit table to find the code to
......
......@@ -305,6 +305,25 @@ struct rvt_rq {
spinlock_t lock ____cacheline_aligned_in_smp;
};
/**
* rvt_get_rq_count - count numbers of request work queue entries
* in circular buffer
* @rq: data structure for request queue entry
* @head: head indices of the circular buffer
* @tail: tail indices of the circular buffer
*
* Return - total number of entries in the Receive Queue
*/
static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
{
u32 count = head - tail;
if ((s32)count < 0)
count += rq->size;
return count;
}
/*
* This structure holds the information that the send tasklet needs
* to send a RDMA read response or atomic operation.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment