Commit e8dc4e88 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/cm: Fix ordering of xa_alloc_cyclic() in ib_create_cm_id()

xa_alloc_cyclic() is a SMP release to be paired with some later acquire
during xa_load() as part of cm_acquire_id().

As such, xa_alloc_cyclic() must be done after the cm_id is fully
initialized, in particular, it absolutely must be after the
refcount_set(), otherwise the refcount_inc() in cm_acquire_id() may not
see the set.

As there are several cases where a reader will be able to use the
id.local_id after cm_acquire_id in the IB_CM_IDLE state there needs to be
an unfortunate split into a NULL allocate and a finalizing xa_store.

Fixes: a977049d ("[PATCH] IB: Add the kernel CM implementation")
Link: https://lore.kernel.org/r/20200310092545.251365-2-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 9e57a9aa
......@@ -571,18 +571,6 @@ static int cm_init_av_by_path(struct sa_path_rec *path,
return 0;
}
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{
int err;
u32 id;
err = xa_alloc_cyclic_irq(&cm.local_id_table, &id, cm_id_priv,
xa_limit_32b, &cm.local_id_next, GFP_KERNEL);
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
return err;
}
static u32 cm_local_id(__be32 local_id)
{
return (__force u32) (local_id ^ cm.random_id_operand);
......@@ -824,6 +812,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
void *context)
{
struct cm_id_private *cm_id_priv;
u32 id;
int ret;
cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
......@@ -835,9 +824,6 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
cm_id_priv->id.cm_handler = cm_handler;
cm_id_priv->id.context = context;
cm_id_priv->id.remote_cm_qpn = 1;
ret = cm_alloc_id(cm_id_priv);
if (ret)
goto error;
spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp);
......@@ -846,11 +832,20 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1);
refcount_set(&cm_id_priv->refcount, 1);
ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
&cm.local_id_next, GFP_KERNEL);
if (ret)
goto error;
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
cm_id_priv, GFP_KERNEL);
return &cm_id_priv->id;
error:
kfree(cm_id_priv);
return ERR_PTR(-ENOMEM);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(ib_create_cm_id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment