Commit d490ee52 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/cma: Fix locking for the RDMA_CM_LISTEN state

There is a strange unlocked read of the ID state when checking for
reuseaddr. This is because an ID cannot be reusable once it becomes a
listening ID. Instead of using the state to exclude reuse, just clear it
as part of rdma_listen()'s flow to convert reusable into not reusable.

Once a ID goes to listen there is no way back out, and the only use of
reusable is on the bind_list check.

Finally, update the checks under handler_mutex to use READ_ONCE and audit
that once RDMA_CM_LISTEN is observed in a req callback it is stable under
the handler_mutex.

Link: https://lore.kernel.org/r/20200902081122.745412-4-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 732d41c5
...@@ -2195,7 +2195,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id, ...@@ -2195,7 +2195,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
} }
mutex_lock(&listen_id->handler_mutex); mutex_lock(&listen_id->handler_mutex);
if (listen_id->state != RDMA_CM_LISTEN) { if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) {
ret = -ECONNABORTED; ret = -ECONNABORTED;
goto err_unlock; goto err_unlock;
} }
...@@ -2373,7 +2373,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, ...@@ -2373,7 +2373,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
listen_id = cm_id->context; listen_id = cm_id->context;
mutex_lock(&listen_id->handler_mutex); mutex_lock(&listen_id->handler_mutex);
if (listen_id->state != RDMA_CM_LISTEN) if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN)
goto out; goto out;
/* Create a new RDMA id for the new IW CM ID */ /* Create a new RDMA id for the new IW CM ID */
...@@ -3327,7 +3327,8 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) ...@@ -3327,7 +3327,8 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
spin_lock_irqsave(&id_priv->lock, flags); spin_lock_irqsave(&id_priv->lock, flags);
if (reuse || id_priv->state == RDMA_CM_IDLE) { if ((reuse && id_priv->state != RDMA_CM_LISTEN) ||
id_priv->state == RDMA_CM_IDLE) {
id_priv->reuseaddr = reuse; id_priv->reuseaddr = reuse;
ret = 0; ret = 0;
} else { } else {
...@@ -3521,8 +3522,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list, ...@@ -3521,8 +3522,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
if (id_priv == cur_id) if (id_priv == cur_id)
continue; continue;
if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr && if (reuseaddr && cur_id->reuseaddr)
cur_id->reuseaddr)
continue; continue;
cur_addr = cma_src_addr(cur_id); cur_addr = cma_src_addr(cur_id);
...@@ -3563,18 +3563,6 @@ static int cma_use_port(enum rdma_ucm_port_space ps, ...@@ -3563,18 +3563,6 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
return ret; return ret;
} }
static int cma_bind_listen(struct rdma_id_private *id_priv)
{
struct rdma_bind_list *bind_list = id_priv->bind_list;
int ret = 0;
mutex_lock(&lock);
if (bind_list->owners.first->next)
ret = cma_check_port(bind_list, id_priv, 0);
mutex_unlock(&lock);
return ret;
}
static enum rdma_ucm_port_space static enum rdma_ucm_port_space
cma_select_inet_ps(struct rdma_id_private *id_priv) cma_select_inet_ps(struct rdma_id_private *id_priv)
{ {
...@@ -3683,8 +3671,16 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) ...@@ -3683,8 +3671,16 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
return -EINVAL; return -EINVAL;
} }
/*
* Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable
* any more, and has to be unique in the bind list.
*/
if (id_priv->reuseaddr) { if (id_priv->reuseaddr) {
ret = cma_bind_listen(id_priv); mutex_lock(&lock);
ret = cma_check_port(id_priv->bind_list, id_priv, 0);
if (!ret)
id_priv->reuseaddr = 0;
mutex_unlock(&lock);
if (ret) if (ret)
goto err; goto err;
} }
...@@ -3709,6 +3705,10 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) ...@@ -3709,6 +3705,10 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
return 0; return 0;
err: err:
id_priv->backlog = 0; id_priv->backlog = 0;
/*
* All the failure paths that lead here will not allow the req_handler's
* to have run.
*/
cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment