Commit 2305d686 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/cm: Make the destroy_id flow more robust

Too much of the destruction is very carefully sensitive to the state
and various other things. Move more code to the unconditional path and
add several WARN_ONs to check consistency.

Link: https://lore.kernel.org/r/20200310092545.251365-5-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent bede86a3
...@@ -825,6 +825,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, ...@@ -825,6 +825,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
cm_id_priv->id.context = context; cm_id_priv->id.context = context;
cm_id_priv->id.remote_cm_qpn = 1; cm_id_priv->id.remote_cm_qpn = 1;
RB_CLEAR_NODE(&cm_id_priv->service_node);
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
spin_lock_init(&cm_id_priv->lock); spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp); init_completion(&cm_id_priv->comp);
INIT_LIST_HEAD(&cm_id_priv->work_list); INIT_LIST_HEAD(&cm_id_priv->work_list);
...@@ -982,11 +984,13 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err) ...@@ -982,11 +984,13 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
spin_lock_irq(&cm.lock); spin_lock_irq(&cm.lock);
if (--cm_id_priv->listen_sharecount > 0) { if (--cm_id_priv->listen_sharecount > 0) {
/* The id is still shared. */ /* The id is still shared. */
WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
cm_deref_id(cm_id_priv); cm_deref_id(cm_id_priv);
spin_unlock_irq(&cm.lock); spin_unlock_irq(&cm.lock);
return; return;
} }
rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
RB_CLEAR_NODE(&cm_id_priv->service_node);
spin_unlock_irq(&cm.lock); spin_unlock_irq(&cm.lock);
break; break;
case IB_CM_SIDR_REQ_SENT: case IB_CM_SIDR_REQ_SENT:
...@@ -997,11 +1001,6 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err) ...@@ -997,11 +1001,6 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
case IB_CM_SIDR_REQ_RCVD: case IB_CM_SIDR_REQ_RCVD:
spin_unlock_irq(&cm_id_priv->lock); spin_unlock_irq(&cm_id_priv->lock);
cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
spin_lock_irq(&cm.lock);
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
rb_erase(&cm_id_priv->sidr_id_node,
&cm.remote_sidr_table);
spin_unlock_irq(&cm.lock);
break; break;
case IB_CM_REQ_SENT: case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REQ_RCVD:
...@@ -1068,6 +1067,10 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err) ...@@ -1068,6 +1067,10 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
if (!list_empty(&cm_id_priv->prim_list) && if (!list_empty(&cm_id_priv->prim_list) &&
(!cm_id_priv->prim_send_port_not_ready)) (!cm_id_priv->prim_send_port_not_ready))
list_del(&cm_id_priv->prim_list); list_del(&cm_id_priv->prim_list);
WARN_ON(cm_id_priv->listen_sharecount);
WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
spin_unlock(&cm.lock); spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock); spin_unlock_irq(&cm_id_priv->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment