Commit a942ec27 authored by Chengchang Tang's avatar Chengchang Tang Committed by Leon Romanovsky

RDMA/hns: Fix UAF for cq async event

The refcount of CQ is not protected by locks. When CQ asynchronous
events and CQ destruction are concurrent, CQ may have been released,
which will cause UAF.

Use the xa_lock() to protect the CQ refcount.

Fixes: 9a443537 ("IB/hns: Add driver files for hns RoCE driver")
Signed-off-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarJunxian Huang <huangjunxian6@hisilicon.com>
Link: https://lore.kernel.org/r/20240412091616.370789-6-huangjunxian6@hisilicon.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent b46494b6
......@@ -149,7 +149,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
return ret;
}
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) {
ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
goto err_put;
......@@ -163,7 +163,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
return 0;
err_xa:
xa_erase(&cq_table->array, hr_cq->cqn);
xa_erase_irq(&cq_table->array, hr_cq->cqn);
err_put:
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
......@@ -182,7 +182,7 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
xa_erase(&cq_table->array, hr_cq->cqn);
xa_erase_irq(&cq_table->array, hr_cq->cqn);
/* Waiting interrupt process procedure carried out */
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
......@@ -476,13 +476,6 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
struct ib_event event;
struct ib_cq *ibcq;
hr_cq = xa_load(&hr_dev->cq_table.array,
cqn & (hr_dev->caps.num_cqs - 1));
if (!hr_cq) {
dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
return;
}
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
......@@ -491,7 +484,16 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
return;
}
refcount_inc(&hr_cq->refcount);
xa_lock(&hr_dev->cq_table.array);
hr_cq = xa_load(&hr_dev->cq_table.array,
cqn & (hr_dev->caps.num_cqs - 1));
if (hr_cq)
refcount_inc(&hr_cq->refcount);
xa_unlock(&hr_dev->cq_table.array);
if (!hr_cq) {
dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
return;
}
ibcq = &hr_cq->ib_cq;
if (ibcq->event_handler) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment