Commit cc031556 authored by Mustafa Ismail's avatar Mustafa Ismail Committed by Jason Gunthorpe

RDMA/irdma: Fix sleep from invalid context BUG

Taking the qos_mutex to process RoCEv2 QP's on netdev events causes a
kernel splat.

Fix this by removing the handling for RoCEv2 in
irdma_cm_teardown_connections that uses the mutex. This handling is only
needed for iWARP to avoid having connections established while the link is
down or having connections remain functional after the IP address is
removed.

  BUG: sleeping function called from invalid context at kernel/locking/mutex.
  Call Trace:
  kernel: dump_stack+0x66/0x90
  kernel: ___might_sleep.cold.92+0x8d/0x9a
  kernel: mutex_lock+0x1c/0x40
  kernel: irdma_cm_teardown_connections+0x28e/0x4d0 [irdma]
  kernel: ? check_preempt_curr+0x7a/0x90
  kernel: ? select_idle_sibling+0x22/0x3c0
  kernel: ? select_task_rq_fair+0x94c/0xc90
  kernel: ? irdma_exec_cqp_cmd+0xc27/0x17c0 [irdma]
  kernel: ? __wake_up_common+0x7a/0x190
  kernel: irdma_if_notify+0x3cc/0x450 [irdma]
  kernel: ? sched_clock_cpu+0xc/0xb0
  kernel: irdma_inet6addr_event+0xc6/0x150 [irdma]

Fixes: 146b9756 ("RDMA/irdma: Add connection manager")
Signed-off-by: default avatarMustafa Ismail <mustafa.ismail@intel.com>
Signed-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 5e8afb87
......@@ -4231,10 +4231,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_node *cm_node;
struct list_head teardown_list;
struct ib_qp_attr attr;
struct irdma_sc_vsi *vsi = &iwdev->vsi;
struct irdma_sc_qp *sc_qp;
struct irdma_qp *qp;
int i;
INIT_LIST_HEAD(&teardown_list);
......@@ -4251,52 +4247,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
irdma_cm_disconn(cm_node->iwqp);
irdma_rem_ref_cm_node(cm_node);
}
if (!iwdev->roce_mode)
return;
INIT_LIST_HEAD(&teardown_list);
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
mutex_lock(&vsi->qos[i].qos_mutex);
list_for_each_safe (list_node, list_core_temp,
&vsi->qos[i].qplist) {
u32 qp_ip[4];
sc_qp = container_of(list_node, struct irdma_sc_qp,
list);
if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
continue;
qp = sc_qp->qp_uk.back_qp;
if (!disconnect_all) {
if (nfo->ipv4)
qp_ip[0] = qp->udp_info.local_ipaddr[3];
else
memcpy(qp_ip,
&qp->udp_info.local_ipaddr[0],
sizeof(qp_ip));
}
if (disconnect_all ||
(nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) &&
!memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
spin_lock(&iwdev->rf->qptable_lock);
if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
irdma_qp_add_ref(&qp->ibqp);
list_add(&qp->teardown_entry,
&teardown_list);
}
spin_unlock(&iwdev->rf->qptable_lock);
}
}
mutex_unlock(&vsi->qos[i].qos_mutex);
}
list_for_each_safe (list_node, list_core_temp, &teardown_list) {
qp = container_of(list_node, struct irdma_qp, teardown_entry);
attr.qp_state = IB_QPS_ERR;
irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
irdma_qp_rem_ref(&qp->ibqp);
}
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment