Commit 2f64e70c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Doug Ledford:

 - Various driver bug fixes in mlx5, mlx4, bnxt_re and qedr, ranging
   from bugs under load to bad error case handling

 - There in one largish patch fixing the locking in bnxt_re to avoid a
   machine hard lock situation

 - A few core bugs on error paths

 - A patch to reduce stack usage in the new CQ API

 - One mlx5 regression introduced in this merge window

 - There were new syzkaller scripts written for the RDMA subsystem and
   we are fixing issues found by the bot

 - One of the commits (aa0de36a “RDMA/mlx5: Fix integer overflow
   while resizing CQ”) is missing part of the commit log message and one
   of the SOB lines. The original patch was from Leon Romanovsky, and a
   cut-n-paste separator in the commit message confused patchworks which
   then put the end of message separator in the wrong place in the
   downloaded patch, and I didn’t notice in time. The patch made it into
   the official branch, and the only way to fix it in-place was to
   rebase. Given the pain that a rebase causes, and the fact that the
   patch has relevant tags for stable and syzkaller, a revert of the
   munged patch and a reapplication of the original patch with the log
   message intact was done.

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (25 commits)
  RDMA/mlx5: Fix integer overflow while resizing CQ
  Revert "RDMA/mlx5: Fix integer overflow while resizing CQ"
  RDMA/ucma: Check that user doesn't overflow QP state
  RDMA/mlx5: Fix integer overflow while resizing CQ
  RDMA/ucma: Limit possible option size
  IB/core: Fix possible crash to access NULL netdev
  RDMA/bnxt_re: Avoid Hard lockup during error CQE processing
  RDMA/core: Reduce poll batch for direct cq polling
  IB/mlx5: Fix an error code in __mlx5_ib_modify_qp()
  IB/mlx5: When not in dual port RoCE mode, use provided port as native
  IB/mlx4: Include GID type when deleting GIDs from HW table under RoCE
  IB/mlx4: Fix corruption of RoCEv2 IPv4 GIDs
  RDMA/qedr: Fix iWARP write and send with immediate
  RDMA/qedr: Fix kernel panic when running fio over NFSoRDMA
  RDMA/qedr: Fix iWARP connect with port mapper
  RDMA/qedr: Fix ipv6 destination address resolution
  IB/core : Add null pointer check in addr_resolve
  RDMA/bnxt_re: Fix the ib_reg failure cleanup
  RDMA/bnxt_re: Fix incorrect DB offset calculation
  RDMA/bnxt_re: Unconditionly fence non wire memory operations
  ...
parents b3337a6c 28e9091e
...@@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in, ...@@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in,
dst_release(dst); dst_release(dst);
} }
if (ndev->flags & IFF_LOOPBACK) { if (ndev) {
ret = rdma_translate_ip(dst_in, addr); if (ndev->flags & IFF_LOOPBACK)
/* ret = rdma_translate_ip(dst_in, addr);
* Put the loopback device and get the translated else
* device instead. addr->bound_dev_if = ndev->ifindex;
*/
dev_put(ndev); dev_put(ndev);
ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
} else {
addr->bound_dev_if = ndev->ifindex;
} }
dev_put(ndev);
return ret; return ret;
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
/* # of WCs to poll for with a single call to ib_poll_cq */ /* # of WCs to poll for with a single call to ib_poll_cq */
#define IB_POLL_BATCH 16 #define IB_POLL_BATCH 16
#define IB_POLL_BATCH_DIRECT 8
/* # of WCs to iterate over before yielding */ /* # of WCs to iterate over before yielding */
#define IB_POLL_BUDGET_IRQ 256 #define IB_POLL_BUDGET_IRQ 256
...@@ -25,18 +26,18 @@ ...@@ -25,18 +26,18 @@
#define IB_POLL_FLAGS \ #define IB_POLL_FLAGS \
(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
int batch)
{ {
int i, n, completed = 0; int i, n, completed = 0;
struct ib_wc *wcs = poll_wc ? : cq->wc;
/* /*
* budget might be (-1) if the caller does not * budget might be (-1) if the caller does not
* want to bound this call, thus we need unsigned * want to bound this call, thus we need unsigned
* minimum here. * minimum here.
*/ */
while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, while ((n = ib_poll_cq(cq, min_t(u32, batch,
budget - completed), wcs)) > 0) { budget - completed), wcs)) > 0) {
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
struct ib_wc *wc = &wcs[i]; struct ib_wc *wc = &wcs[i];
...@@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) ...@@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
completed += n; completed += n;
if (n != IB_POLL_BATCH || if (n != batch || (budget != -1 && completed >= budget))
(budget != -1 && completed >= budget))
break; break;
} }
...@@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) ...@@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
*/ */
int ib_process_cq_direct(struct ib_cq *cq, int budget) int ib_process_cq_direct(struct ib_cq *cq, int budget)
{ {
struct ib_wc wcs[IB_POLL_BATCH]; struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
return __ib_process_cq(cq, budget, wcs); return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
} }
EXPORT_SYMBOL(ib_process_cq_direct); EXPORT_SYMBOL(ib_process_cq_direct);
...@@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget) ...@@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
struct ib_cq *cq = container_of(iop, struct ib_cq, iop); struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
int completed; int completed;
completed = __ib_process_cq(cq, budget, NULL); completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
if (completed < budget) { if (completed < budget) {
irq_poll_complete(&cq->iop); irq_poll_complete(&cq->iop);
if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
...@@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work) ...@@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work)
struct ib_cq *cq = container_of(work, struct ib_cq, work); struct ib_cq *cq = container_of(work, struct ib_cq, work);
int completed; int completed;
completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL); completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
IB_POLL_BATCH);
if (completed >= IB_POLL_BUDGET_WORKQUEUE || if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
queue_work(ib_comp_wq, &cq->work); queue_work(ib_comp_wq, &cq->work);
......
...@@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device, ...@@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device,
ret = device->query_device(device, &device->attrs, &uhw); ret = device->query_device(device, &device->attrs, &uhw);
if (ret) { if (ret) {
pr_warn("Couldn't query the device attributes\n"); pr_warn("Couldn't query the device attributes\n");
goto cache_cleanup; goto cg_cleanup;
} }
ret = ib_device_register_sysfs(device, port_callback); ret = ib_device_register_sysfs(device, port_callback);
if (ret) { if (ret) {
pr_warn("Couldn't register device %s with driver model\n", pr_warn("Couldn't register device %s with driver model\n",
device->name); device->name);
goto cache_cleanup; goto cg_cleanup;
} }
device->reg_state = IB_DEV_REGISTERED; device->reg_state = IB_DEV_REGISTERED;
...@@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device, ...@@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device,
mutex_unlock(&device_mutex); mutex_unlock(&device_mutex);
return 0; return 0;
cg_cleanup:
ib_device_unregister_rdmacg(device);
cache_cleanup: cache_cleanup:
ib_cache_cleanup_one(device); ib_cache_cleanup_one(device);
ib_cache_release_one(device); ib_cache_release_one(device);
......
...@@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, ...@@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
resolved_dev = dev_get_by_index(dev_addr.net, resolved_dev = dev_get_by_index(dev_addr.net,
dev_addr.bound_dev_if); dev_addr.bound_dev_if);
if (resolved_dev->flags & IFF_LOOPBACK) { if (!resolved_dev) {
dev_put(resolved_dev); dev_put(idev);
resolved_dev = idev; return -ENODEV;
dev_hold(resolved_dev);
} }
ndev = ib_get_ndev_from_path(rec); ndev = ib_get_ndev_from_path(rec);
rcu_read_lock(); rcu_read_lock();
......
...@@ -1149,6 +1149,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, ...@@ -1149,6 +1149,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd))) if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
if (cmd.qp_state > IB_QPS_ERR)
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id); ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
...@@ -1294,6 +1297,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, ...@@ -1294,6 +1297,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
return -EINVAL;
optval = memdup_user((void __user *) (unsigned long) cmd.optval, optval = memdup_user((void __user *) (unsigned long) cmd.optval,
cmd.optlen); cmd.optlen);
if (IS_ERR(optval)) { if (IS_ERR(optval)) {
......
...@@ -785,7 +785,7 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) ...@@ -785,7 +785,7 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
return 0; return 0;
} }
static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
{ {
unsigned long flags; unsigned long flags;
...@@ -799,8 +799,8 @@ static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) ...@@ -799,8 +799,8 @@ static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
return flags; return flags;
} }
static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
unsigned long flags) unsigned long flags)
__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
{ {
if (qp->rcq != qp->scq) if (qp->rcq != qp->scq)
...@@ -1606,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, ...@@ -1606,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
int status; int status;
union ib_gid sgid; union ib_gid sgid;
struct ib_gid_attr sgid_attr; struct ib_gid_attr sgid_attr;
unsigned int flags;
u8 nw_type; u8 nw_type;
qp->qplib_qp.modify_flags = 0; qp->qplib_qp.modify_flags = 0;
...@@ -1634,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, ...@@ -1634,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
dev_dbg(rdev_to_dev(rdev), dev_dbg(rdev_to_dev(rdev),
"Move QP = %p to flush list\n", "Move QP = %p to flush list\n",
qp); qp);
flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_add_flush_qp(&qp->qplib_qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
} }
if (!qp->sumem && if (!qp->sumem &&
qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
dev_dbg(rdev_to_dev(rdev), dev_dbg(rdev_to_dev(rdev),
"Move QP = %p out of flush list\n", "Move QP = %p out of flush list\n",
qp); qp);
flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_clean_qp(&qp->qplib_qp); bnxt_qplib_clean_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
} }
} }
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
...@@ -2227,10 +2232,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr, ...@@ -2227,10 +2232,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
/* Need unconditional fence for local invalidate
* opcode to work as expected.
*/
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
if (wr->send_flags & IB_SEND_SIGNALED) if (wr->send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
if (wr->send_flags & IB_SEND_FENCE)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
if (wr->send_flags & IB_SEND_SOLICITED) if (wr->send_flags & IB_SEND_SOLICITED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
...@@ -2251,8 +2259,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr, ...@@ -2251,8 +2259,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
wqe->frmr.levels = qplib_frpl->hwq.level + 1; wqe->frmr.levels = qplib_frpl->hwq.level + 1;
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
if (wr->wr.send_flags & IB_SEND_FENCE) /* Need unconditional fence for reg_mr
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; * opcode to function as expected.
*/
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
if (wr->wr.send_flags & IB_SEND_SIGNALED) if (wr->wr.send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
......
...@@ -222,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev, ...@@ -222,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata); struct ib_udata *udata);
int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
#endif /* __BNXT_RE_IB_VERBS_H__ */ #endif /* __BNXT_RE_IB_VERBS_H__ */
...@@ -730,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, ...@@ -730,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
struct bnxt_re_qp *qp) struct bnxt_re_qp *qp)
{ {
struct ib_event event; struct ib_event event;
unsigned int flags;
if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_add_flush_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
}
memset(&event, 0, sizeof(event)); memset(&event, 0, sizeof(event));
if (qp->qplib_qp.srq) { if (qp->qplib_qp.srq) {
...@@ -1416,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work) ...@@ -1416,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work)
switch (re_work->event) { switch (re_work->event) {
case NETDEV_REGISTER: case NETDEV_REGISTER:
rc = bnxt_re_ib_reg(rdev); rc = bnxt_re_ib_reg(rdev);
if (rc) if (rc) {
dev_err(rdev_to_dev(rdev), dev_err(rdev_to_dev(rdev),
"Failed to register with IB: %#x", rc); "Failed to register with IB: %#x", rc);
bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev);
}
break; break;
case NETDEV_UP: case NETDEV_UP:
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
......
...@@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) ...@@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
} }
} }
void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
unsigned long *flags) unsigned long *flags)
__acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock) __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
{ {
spin_lock_irqsave(&qp->scq->hwq.lock, *flags); spin_lock_irqsave(&qp->scq->flush_lock, *flags);
if (qp->scq == qp->rcq) if (qp->scq == qp->rcq)
__acquire(&qp->rcq->hwq.lock); __acquire(&qp->rcq->flush_lock);
else else
spin_lock(&qp->rcq->hwq.lock); spin_lock(&qp->rcq->flush_lock);
} }
void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
unsigned long *flags) unsigned long *flags)
__releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock) __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
{ {
if (qp->scq == qp->rcq) if (qp->scq == qp->rcq)
__release(&qp->rcq->hwq.lock); __release(&qp->rcq->flush_lock);
else else
spin_unlock(&qp->rcq->hwq.lock); spin_unlock(&qp->rcq->flush_lock);
spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags); spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
}
static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_cq *cq)
{
struct bnxt_qplib_cq *buddy_cq = NULL;
if (qp->scq == qp->rcq)
buddy_cq = NULL;
else if (qp->scq == cq)
buddy_cq = qp->rcq;
else
buddy_cq = qp->scq;
return buddy_cq;
}
static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_cq *cq)
__acquires(&buddy_cq->hwq.lock)
{
struct bnxt_qplib_cq *buddy_cq = NULL;
buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
if (!buddy_cq)
__acquire(&cq->hwq.lock);
else
spin_lock(&buddy_cq->hwq.lock);
}
static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_cq *cq)
__releases(&buddy_cq->hwq.lock)
{
struct bnxt_qplib_cq *buddy_cq = NULL;
buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
if (!buddy_cq)
__release(&cq->hwq.lock);
else
spin_unlock(&buddy_cq->hwq.lock);
} }
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
{ {
unsigned long flags; unsigned long flags;
bnxt_qplib_acquire_cq_locks(qp, &flags); bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
__bnxt_qplib_add_flush_qp(qp); __bnxt_qplib_add_flush_qp(qp);
bnxt_qplib_release_cq_locks(qp, &flags); bnxt_qplib_release_cq_flush_locks(qp, &flags);
} }
static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
...@@ -177,7 +137,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) ...@@ -177,7 +137,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
{ {
unsigned long flags; unsigned long flags;
bnxt_qplib_acquire_cq_locks(qp, &flags); bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
__clean_cq(qp->scq, (u64)(unsigned long)qp); __clean_cq(qp->scq, (u64)(unsigned long)qp);
qp->sq.hwq.prod = 0; qp->sq.hwq.prod = 0;
qp->sq.hwq.cons = 0; qp->sq.hwq.cons = 0;
...@@ -186,7 +146,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) ...@@ -186,7 +146,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
qp->rq.hwq.cons = 0; qp->rq.hwq.cons = 0;
__bnxt_qplib_del_flush_qp(qp); __bnxt_qplib_del_flush_qp(qp);
bnxt_qplib_release_cq_locks(qp, &flags); bnxt_qplib_release_cq_flush_locks(qp, &flags);
} }
static void bnxt_qpn_cqn_sched_task(struct work_struct *work) static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
...@@ -2107,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle) ...@@ -2107,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
/* Must block new posting of SQ and RQ */ /* Must block new posting of SQ and RQ */
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
bnxt_qplib_cancel_phantom_processing(qp); bnxt_qplib_cancel_phantom_processing(qp);
/* Add qp to flush list of the CQ */
__bnxt_qplib_add_flush_qp(qp);
} }
/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
...@@ -2285,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, ...@@ -2285,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
sw_sq_cons, cqe->wr_id, cqe->status); sw_sq_cons, cqe->wr_id, cqe->status);
cqe++; cqe++;
(*budget)--; (*budget)--;
bnxt_qplib_lock_buddy_cq(qp, cq);
bnxt_qplib_mark_qp_error(qp); bnxt_qplib_mark_qp_error(qp);
bnxt_qplib_unlock_buddy_cq(qp, cq); /* Add qp to flush list of the CQ */
bnxt_qplib_add_flush_qp(qp);
} else { } else {
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
/* Before we complete, do WA 9060 */ /* Before we complete, do WA 9060 */
...@@ -2403,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, ...@@ -2403,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
if (hwcqe->status != CQ_RES_RC_STATUS_OK) { if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */ /* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq(qp, cq); bnxt_qplib_add_flush_qp(qp);
__bnxt_qplib_add_flush_qp(qp);
bnxt_qplib_unlock_buddy_cq(qp, cq);
} }
} }
...@@ -2489,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, ...@@ -2489,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
if (hwcqe->status != CQ_RES_RC_STATUS_OK) { if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */ /* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq(qp, cq); bnxt_qplib_add_flush_qp(qp);
__bnxt_qplib_add_flush_qp(qp);
bnxt_qplib_unlock_buddy_cq(qp, cq);
} }
} }
done: done:
...@@ -2501,11 +2454,9 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, ...@@ -2501,11 +2454,9 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
{ {
struct cq_base *hw_cqe, **hw_cqe_ptr; struct cq_base *hw_cqe, **hw_cqe_ptr;
unsigned long flags;
u32 sw_cons, raw_cons; u32 sw_cons, raw_cons;
bool rc = true; bool rc = true;
spin_lock_irqsave(&cq->hwq.lock, flags);
raw_cons = cq->hwq.cons; raw_cons = cq->hwq.cons;
sw_cons = HWQ_CMP(raw_cons, &cq->hwq); sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
...@@ -2513,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) ...@@ -2513,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
/* Check for Valid bit. If the CQE is valid, return false */ /* Check for Valid bit. If the CQE is valid, return false */
rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
spin_unlock_irqrestore(&cq->hwq.lock, flags);
return rc; return rc;
} }
...@@ -2602,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, ...@@ -2602,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
if (hwcqe->status != CQ_RES_RC_STATUS_OK) { if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */ /* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq(qp, cq); bnxt_qplib_add_flush_qp(qp);
__bnxt_qplib_add_flush_qp(qp);
bnxt_qplib_unlock_buddy_cq(qp, cq);
} }
} }
...@@ -2719,9 +2667,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq, ...@@ -2719,9 +2667,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
*/ */
/* Add qp to flush list of the CQ */ /* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq(qp, cq); bnxt_qplib_add_flush_qp(qp);
__bnxt_qplib_add_flush_qp(qp);
bnxt_qplib_unlock_buddy_cq(qp, cq);
done: done:
return rc; return rc;
} }
...@@ -2750,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, ...@@ -2750,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
u32 budget = num_cqes; u32 budget = num_cqes;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&cq->hwq.lock, flags); spin_lock_irqsave(&cq->flush_lock, flags);
list_for_each_entry(qp, &cq->sqf_head, sq_flush) { list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
dev_dbg(&cq->hwq.pdev->dev, dev_dbg(&cq->hwq.pdev->dev,
"QPLIB: FP: Flushing SQ QP= %p", "QPLIB: FP: Flushing SQ QP= %p",
...@@ -2764,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, ...@@ -2764,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
qp); qp);
__flush_rq(&qp->rq, qp, &cqe, &budget); __flush_rq(&qp->rq, qp, &cqe, &budget);
} }
spin_unlock_irqrestore(&cq->hwq.lock, flags); spin_unlock_irqrestore(&cq->flush_lock, flags);
return num_cqes - budget; return num_cqes - budget;
} }
...@@ -2773,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, ...@@ -2773,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num_cqes, struct bnxt_qplib_qp **lib_qp) int num_cqes, struct bnxt_qplib_qp **lib_qp)
{ {
struct cq_base *hw_cqe, **hw_cqe_ptr; struct cq_base *hw_cqe, **hw_cqe_ptr;
unsigned long flags;
u32 sw_cons, raw_cons; u32 sw_cons, raw_cons;
int budget, rc = 0; int budget, rc = 0;
spin_lock_irqsave(&cq->hwq.lock, flags);
raw_cons = cq->hwq.cons; raw_cons = cq->hwq.cons;
budget = num_cqes; budget = num_cqes;
...@@ -2853,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, ...@@ -2853,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ); bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
} }
exit: exit:
spin_unlock_irqrestore(&cq->hwq.lock, flags);
return num_cqes - budget; return num_cqes - budget;
} }
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
{ {
unsigned long flags;
spin_lock_irqsave(&cq->hwq.lock, flags);
if (arm_type) if (arm_type)
bnxt_qplib_arm_cq(cq, arm_type); bnxt_qplib_arm_cq(cq, arm_type);
/* Using cq->arm_state variable to track whether to issue cq handler */ /* Using cq->arm_state variable to track whether to issue cq handler */
atomic_set(&cq->arm_state, 1); atomic_set(&cq->arm_state, 1);
spin_unlock_irqrestore(&cq->hwq.lock, flags);
} }
void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
......
...@@ -389,6 +389,18 @@ struct bnxt_qplib_cq { ...@@ -389,6 +389,18 @@ struct bnxt_qplib_cq {
struct list_head sqf_head, rqf_head; struct list_head sqf_head, rqf_head;
atomic_t arm_state; atomic_t arm_state;
spinlock_t compl_lock; /* synch CQ handlers */ spinlock_t compl_lock; /* synch CQ handlers */
/* Locking Notes:
* QP can move to error state from modify_qp, async error event or error
* CQE as part of poll_cq. When QP is moved to error state, it gets added
* to two flush lists, one each for SQ and RQ.
* Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
* flush_locks should be acquired when QP is moved to error. The control path
* operations(modify_qp and async error events) are synchronized with poll_cq
* using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
* The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
* of the same QP while manipulating the flush list.
*/
spinlock_t flush_lock; /* QP flush management */
}; };
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
......
...@@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, ...@@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
err_event->res_err_state_reason); err_event->res_err_state_reason);
if (!qp) if (!qp)
break; break;
bnxt_qplib_acquire_cq_locks(qp, &flags);
bnxt_qplib_mark_qp_error(qp); bnxt_qplib_mark_qp_error(qp);
bnxt_qplib_release_cq_locks(qp, &flags); rcfw->aeq_handler(rcfw, qp_event, qp);
break; break;
default: default:
/* Command Response */ /* Command Response */
...@@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, ...@@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
int rc; int rc;
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
/* Supply (log-base-2-of-host-page-size - base-page-shift)
* to bono to adjust the doorbell page sizes.
*/
req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
RCFW_DBR_BASE_PAGE_SHIFT);
/* /*
* VFs need not setup the HW context area, PF * VFs need not setup the HW context area, PF
* shall setup this area for VF. Skipping the * shall setup this area for VF. Skipping the
......
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
#define RCFW_COMM_SIZE 0x104 #define RCFW_COMM_SIZE 0x104
#define RCFW_DBR_PCI_BAR_REGION 2 #define RCFW_DBR_PCI_BAR_REGION 2
#define RCFW_DBR_BASE_PAGE_SHIFT 12
#define RCFW_CMD_PREP(req, CMD, cmd_flags) \ #define RCFW_CMD_PREP(req, CMD, cmd_flags) \
do { \ do { \
......
...@@ -139,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, ...@@ -139,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_pkey = le32_to_cpu(sb->max_pkeys); attr->max_pkey = le32_to_cpu(sb->max_pkeys);
attr->max_inline_data = le32_to_cpu(sb->max_inline_data); attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; attr->l2_db_size = (sb->l2_db_space_size + 1) *
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
attr->max_sgid = le32_to_cpu(sb->max_gid); attr->max_sgid = le32_to_cpu(sb->max_gid);
bnxt_qplib_query_version(rcfw, attr->fw_ver); bnxt_qplib_query_version(rcfw, attr->fw_ver);
......
...@@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw { ...@@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw {
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4)
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4)
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4)
__le16 reserved16; /* This value is (log-base-2-of-DBR-page-size - 12).
* 0 for 4KB. HW supported values are enumerated below.
*/
__le16 log2_dbr_pg_size;
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \
CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M
__le64 qpc_page_dir; __le64 qpc_page_dir;
__le64 mrw_page_dir; __le64 mrw_page_dir;
__le64 srq_page_dir; __le64 srq_page_dir;
......
...@@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ...@@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
wc->dlid_path_bits = 0; wc->dlid_path_bits = 0;
if (is_eth) { if (is_eth) {
wc->slid = 0;
wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
...@@ -851,7 +852,6 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, ...@@ -851,7 +852,6 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
} }
} }
wc->slid = be16_to_cpu(cqe->rlid);
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
wc->src_qp = g_mlpath_rqpn & 0xffffff; wc->src_qp = g_mlpath_rqpn & 0xffffff;
wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
...@@ -860,6 +860,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, ...@@ -860,6 +860,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
if (is_eth) { if (is_eth) {
wc->slid = 0;
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
if (be32_to_cpu(cqe->vlan_my_qpn) & if (be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_CVLAN_PRESENT_MASK) { MLX4_CQE_CVLAN_PRESENT_MASK) {
...@@ -871,6 +872,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, ...@@ -871,6 +872,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
memcpy(wc->smac, cqe->smac, ETH_ALEN); memcpy(wc->smac, cqe->smac, ETH_ALEN);
wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
} else { } else {
wc->slid = be16_to_cpu(cqe->rlid);
wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
wc->vlan_id = 0xffff; wc->vlan_id = 0xffff;
} }
......
...@@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, ...@@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
gid_tbl[i].version = 2; gid_tbl[i].version = 2;
if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
gid_tbl[i].type = 1; gid_tbl[i].type = 1;
else
memset(&gid_tbl[i].gid, 0, 12);
} }
} }
...@@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device, ...@@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device,
if (!gids) { if (!gids) {
ret = -ENOMEM; ret = -ENOMEM;
} else { } else {
for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); memcpy(&gids[i].gid,
&port_gid_table->gids[i].gid,
sizeof(union ib_gid));
gids[i].gid_type =
port_gid_table->gids[i].gid_type;
}
} }
} }
spin_unlock_bh(&iboe->lock); spin_unlock_bh(&iboe->lock);
......
...@@ -226,7 +226,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, ...@@ -226,7 +226,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
break; break;
} }
wc->slid = be16_to_cpu(cqe->slid);
wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
wc->dlid_path_bits = cqe->ml_path; wc->dlid_path_bits = cqe->ml_path;
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
...@@ -241,10 +240,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, ...@@ -241,10 +240,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
} }
if (ll != IB_LINK_LAYER_ETHERNET) { if (ll != IB_LINK_LAYER_ETHERNET) {
wc->slid = be16_to_cpu(cqe->slid);
wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
return; return;
} }
wc->slid = 0;
vlan_present = cqe->l4_l3_hdr_type & 0x1; vlan_present = cqe->l4_l3_hdr_type & 0x1;
roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
if (vlan_present) { if (vlan_present) {
...@@ -1177,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, ...@@ -1177,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (ucmd.reserved0 || ucmd.reserved1) if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL; return -EINVAL;
umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, /* check multiplication overflow */
if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
return -EINVAL;
umem = ib_umem_get(context, ucmd.buf_addr,
(size_t)ucmd.cqe_size * entries,
IB_ACCESS_LOCAL_WRITE, 1); IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
err = PTR_ERR(umem); err = PTR_ERR(umem);
......
...@@ -245,12 +245,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, ...@@ -245,12 +245,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi; struct mlx5_ib_multiport_info *mpi;
struct mlx5_ib_port *port; struct mlx5_ib_port *port;
if (!mlx5_core_mp_enabled(ibdev->mdev) ||
ll != IB_LINK_LAYER_ETHERNET) {
if (native_port_num)
*native_port_num = ib_port_num;
return ibdev->mdev;
}
if (native_port_num) if (native_port_num)
*native_port_num = 1; *native_port_num = 1;
if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
return ibdev->mdev;
port = &ibdev->port[ib_port_num - 1]; port = &ibdev->port[ib_port_num - 1];
if (!port) if (!port)
return NULL; return NULL;
...@@ -3263,7 +3267,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) ...@@ -3263,7 +3267,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
struct mlx5_ib_dev *ibdev; struct mlx5_ib_dev *ibdev;
struct ib_event ibev; struct ib_event ibev;
bool fatal = false; bool fatal = false;
u8 port = 0; u8 port = (u8)work->param;
if (mlx5_core_is_mp_slave(work->dev)) { if (mlx5_core_is_mp_slave(work->dev)) {
ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
...@@ -3283,8 +3287,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work) ...@@ -3283,8 +3287,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN: case MLX5_DEV_EVENT_PORT_DOWN:
case MLX5_DEV_EVENT_PORT_INITIALIZED: case MLX5_DEV_EVENT_PORT_INITIALIZED:
port = (u8)work->param;
/* In RoCE, port up/down events are handled in /* In RoCE, port up/down events are handled in
* mlx5_netdev_event(). * mlx5_netdev_event().
*/ */
...@@ -3298,24 +3300,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work) ...@@ -3298,24 +3300,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
case MLX5_DEV_EVENT_LID_CHANGE: case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE; ibev.event = IB_EVENT_LID_CHANGE;
port = (u8)work->param;
break; break;
case MLX5_DEV_EVENT_PKEY_CHANGE: case MLX5_DEV_EVENT_PKEY_CHANGE:
ibev.event = IB_EVENT_PKEY_CHANGE; ibev.event = IB_EVENT_PKEY_CHANGE;
port = (u8)work->param;
schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
break; break;
case MLX5_DEV_EVENT_GUID_CHANGE: case MLX5_DEV_EVENT_GUID_CHANGE:
ibev.event = IB_EVENT_GID_CHANGE; ibev.event = IB_EVENT_GID_CHANGE;
port = (u8)work->param;
break; break;
case MLX5_DEV_EVENT_CLIENT_REREG: case MLX5_DEV_EVENT_CLIENT_REREG:
ibev.event = IB_EVENT_CLIENT_REREGISTER; ibev.event = IB_EVENT_CLIENT_REREGISTER;
port = (u8)work->param;
break; break;
case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
schedule_work(&ibdev->delay_drop.delay_drop_work); schedule_work(&ibdev->delay_drop.delay_drop_work);
...@@ -3327,7 +3324,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) ...@@ -3327,7 +3324,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
ibev.device = &ibdev->ib_dev; ibev.device = &ibdev->ib_dev;
ibev.element.port_num = port; ibev.element.port_num = port;
if (port < 1 || port > ibdev->num_ports) { if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
goto out; goto out;
} }
......
...@@ -1816,7 +1816,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, ...@@ -1816,7 +1816,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
mr->ibmr.iova = sg_dma_address(sg) + sg_offset; mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
mr->ibmr.length = 0; mr->ibmr.length = 0;
mr->ndescs = sg_nents;
for_each_sg(sgl, sg, sg_nents, i) { for_each_sg(sgl, sg, sg_nents, i) {
if (unlikely(i >= mr->max_descs)) if (unlikely(i >= mr->max_descs))
...@@ -1828,6 +1827,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, ...@@ -1828,6 +1827,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
sg_offset = 0; sg_offset = 0;
} }
mr->ndescs = i;
if (sg_offset_p) if (sg_offset_p)
*sg_offset_p = sg_offset; *sg_offset_p = sg_offset;
......
...@@ -1584,6 +1584,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1584,6 +1584,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 uidx = MLX5_IB_DEFAULT_UIDX; u32 uidx = MLX5_IB_DEFAULT_UIDX;
struct mlx5_ib_create_qp ucmd; struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_qp_base *base; struct mlx5_ib_qp_base *base;
int mlx5_st;
void *qpc; void *qpc;
u32 *in; u32 *in;
int err; int err;
...@@ -1592,6 +1593,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1592,6 +1593,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock); spin_lock_init(&qp->rq.lock);
mlx5_st = to_mlx5_st(init_attr->qp_type);
if (mlx5_st < 0)
return -EINVAL;
if (init_attr->rwq_ind_tbl) { if (init_attr->rwq_ind_tbl) {
if (!udata) if (!udata)
return -ENOSYS; return -ENOSYS;
...@@ -1753,7 +1758,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1753,7 +1758,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); MLX5_SET(qpc, qpc, st, mlx5_st);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
...@@ -3095,8 +3100,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, ...@@ -3095,8 +3100,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
goto out; goto out;
if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
!optab[mlx5_cur][mlx5_new]) !optab[mlx5_cur][mlx5_new]) {
err = -EINVAL;
goto out; goto out;
}
op = optab[mlx5_cur][mlx5_new]; op = optab[mlx5_cur][mlx5_new];
optpar = ib_mask_to_mlx5_opt(attr_mask); optpar = ib_mask_to_mlx5_opt(attr_mask);
......
...@@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev, ...@@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev,
} }
return -EINVAL; return -EINVAL;
} }
neigh = dst_neigh_lookup(dst, &dst_in); neigh = dst_neigh_lookup(dst, &fl6.daddr);
if (neigh) { if (neigh) {
rcu_read_lock(); rcu_read_lock();
if (neigh->nud_state & NUD_VALID) { if (neigh->nud_state & NUD_VALID) {
...@@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
qp = idr_find(&dev->qpidr, conn_param->qpn); qp = idr_find(&dev->qpidr, conn_param->qpn);
laddr = (struct sockaddr_in *)&cm_id->local_addr; laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
raddr = (struct sockaddr_in *)&cm_id->remote_addr; raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr; raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n",
ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port),
ntohs(raddr->sin_port));
DP_DEBUG(dev, QEDR_MSG_IWARP, DP_DEBUG(dev, QEDR_MSG_IWARP,
"Connect source address: %pISpc, remote address: %pISpc\n", "Connect source address: %pISpc, remote address: %pISpc\n",
...@@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog) ...@@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
int rc; int rc;
int i; int i;
laddr = (struct sockaddr_in *)&cm_id->local_addr; laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
DP_DEBUG(dev, QEDR_MSG_IWARP, DP_DEBUG(dev, QEDR_MSG_IWARP,
"Create Listener address: %pISpc\n", &cm_id->local_addr); "Create Listener address: %pISpc\n", &cm_id->local_addr);
......
...@@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) { switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM: case IB_WR_SEND_WITH_IMM:
if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
rc = -EINVAL;
*bad_wr = wr;
break;
}
wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
swqe = (struct rdma_sq_send_wqe_1st *)wqe; swqe = (struct rdma_sq_send_wqe_1st *)wqe;
swqe->wqe_size = 2; swqe->wqe_size = 2;
...@@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break; break;
case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM:
if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
rc = -EINVAL;
*bad_wr = wr;
break;
}
wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
...@@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ...@@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{ {
struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qedr_dev *dev = get_qedr_dev(ibcq->device);
struct qedr_cq *cq = get_qedr_cq(ibcq); struct qedr_cq *cq = get_qedr_cq(ibcq);
union rdma_cqe *cqe = cq->latest_cqe; union rdma_cqe *cqe;
u32 old_cons, new_cons; u32 old_cons, new_cons;
unsigned long flags; unsigned long flags;
int update = 0; int update = 0;
...@@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ...@@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return qedr_gsi_poll_cq(ibcq, num_entries, wc); return qedr_gsi_poll_cq(ibcq, num_entries, wc);
spin_lock_irqsave(&cq->cq_lock, flags); spin_lock_irqsave(&cq->cq_lock, flags);
cqe = cq->latest_cqe;
old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
while (num_entries && is_valid_cqe(cq, cqe)) { while (num_entries && is_valid_cqe(cq, cqe)) {
struct qedr_qp *qp; struct qedr_qp *qp;
......
...@@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) ...@@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
trigger_cmd_completions(dev); trigger_cmd_completions(dev);
} }
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
mlx5_core_err(dev, "end\n"); mlx5_core_err(dev, "end\n");
unlock: unlock:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment