Commit 60dff56d authored by Weihang Li's avatar Weihang Li Committed by Jason Gunthorpe

RDMA/core: Use refcount_t instead of atomic_t on refcount of iwcm_id_private

The refcount_t API will WARN on underflow and overflow of a reference
counter, and avoid use-after-free risks.

Link: https://lore.kernel.org/r/1622194663-2383-2-git-send-email-liweihang@huawei.comSigned-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 61c7d826
...@@ -211,8 +211,7 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv) ...@@ -211,8 +211,7 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv)
*/ */
static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
{ {
BUG_ON(atomic_read(&cm_id_priv->refcount)==0); if (refcount_dec_and_test(&cm_id_priv->refcount)) {
if (atomic_dec_and_test(&cm_id_priv->refcount)) {
BUG_ON(!list_empty(&cm_id_priv->work_list)); BUG_ON(!list_empty(&cm_id_priv->work_list));
free_cm_id(cm_id_priv); free_cm_id(cm_id_priv);
return 1; return 1;
...@@ -225,7 +224,7 @@ static void add_ref(struct iw_cm_id *cm_id) ...@@ -225,7 +224,7 @@ static void add_ref(struct iw_cm_id *cm_id)
{ {
struct iwcm_id_private *cm_id_priv; struct iwcm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
atomic_inc(&cm_id_priv->refcount); refcount_inc(&cm_id_priv->refcount);
} }
static void rem_ref(struct iw_cm_id *cm_id) static void rem_ref(struct iw_cm_id *cm_id)
...@@ -257,7 +256,7 @@ struct iw_cm_id *iw_create_cm_id(struct ib_device *device, ...@@ -257,7 +256,7 @@ struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
cm_id_priv->id.add_ref = add_ref; cm_id_priv->id.add_ref = add_ref;
cm_id_priv->id.rem_ref = rem_ref; cm_id_priv->id.rem_ref = rem_ref;
spin_lock_init(&cm_id_priv->lock); spin_lock_init(&cm_id_priv->lock);
atomic_set(&cm_id_priv->refcount, 1); refcount_set(&cm_id_priv->refcount, 1);
init_waitqueue_head(&cm_id_priv->connect_wait); init_waitqueue_head(&cm_id_priv->connect_wait);
init_completion(&cm_id_priv->destroy_comp); init_completion(&cm_id_priv->destroy_comp);
INIT_LIST_HEAD(&cm_id_priv->work_list); INIT_LIST_HEAD(&cm_id_priv->work_list);
...@@ -1094,7 +1093,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id, ...@@ -1094,7 +1093,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
} }
} }
atomic_inc(&cm_id_priv->refcount); refcount_inc(&cm_id_priv->refcount);
if (list_empty(&cm_id_priv->work_list)) { if (list_empty(&cm_id_priv->work_list)) {
list_add_tail(&work->list, &cm_id_priv->work_list); list_add_tail(&work->list, &cm_id_priv->work_list);
queue_work(iwcm_wq, &work->work); queue_work(iwcm_wq, &work->work);
......
...@@ -52,7 +52,7 @@ struct iwcm_id_private { ...@@ -52,7 +52,7 @@ struct iwcm_id_private {
wait_queue_head_t connect_wait; wait_queue_head_t connect_wait;
struct list_head work_list; struct list_head work_list;
spinlock_t lock; spinlock_t lock;
atomic_t refcount; refcount_t refcount;
struct list_head work_free_list; struct list_head work_free_list;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment