Commit 736b5a70 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Jason Gunthorpe

RDMA/hns: Convert qp_table_tree to XArray

Also fully initialise the qp before storing it in the XArray.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 27e19f45
...@@ -505,7 +505,6 @@ struct hns_roce_uar_table { ...@@ -505,7 +505,6 @@ struct hns_roce_uar_table {
struct hns_roce_qp_table { struct hns_roce_qp_table {
struct hns_roce_bitmap bitmap; struct hns_roce_bitmap bitmap;
spinlock_t lock;
struct hns_roce_hem_table qp_table; struct hns_roce_hem_table qp_table;
struct hns_roce_hem_table irrl_table; struct hns_roce_hem_table irrl_table;
struct hns_roce_hem_table trrl_table; struct hns_roce_hem_table trrl_table;
...@@ -955,7 +954,7 @@ struct hns_roce_dev { ...@@ -955,7 +954,7 @@ struct hns_roce_dev {
int irq[HNS_ROCE_MAX_IRQ_NUM]; int irq[HNS_ROCE_MAX_IRQ_NUM];
u8 __iomem *reg_base; u8 __iomem *reg_base;
struct hns_roce_caps caps; struct hns_roce_caps caps;
struct radix_tree_root qp_table_tree; struct xarray qp_table_xa;
unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM]; unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM];
u64 sys_image_guid; u64 sys_image_guid;
...@@ -1045,8 +1044,7 @@ static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest) ...@@ -1045,8 +1044,7 @@ static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
static inline struct hns_roce_qp static inline struct hns_roce_qp
*__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn) *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
{ {
return radix_tree_lookup(&hr_dev->qp_table_tree, return xa_load(&hr_dev->qp_table_xa, qpn & (hr_dev->caps.num_qps - 1));
qpn & (hr_dev->caps.num_qps - 1));
} }
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset) static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
......
...@@ -45,17 +45,14 @@ ...@@ -45,17 +45,14 @@
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{ {
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_qp *qp; struct hns_roce_qp *qp;
spin_lock(&qp_table->lock); xa_lock(&hr_dev->qp_table_xa);
qp = __hns_roce_qp_lookup(hr_dev, qpn); qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (qp) if (qp)
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
xa_unlock(&hr_dev->qp_table_xa);
spin_unlock(&qp_table->lock);
if (!qp) { if (!qp) {
dev_warn(dev, "Async event for bogus QP %08x\n", qpn); dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
...@@ -147,29 +144,20 @@ EXPORT_SYMBOL_GPL(to_hns_roce_state); ...@@ -147,29 +144,20 @@ EXPORT_SYMBOL_GPL(to_hns_roce_state);
static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
{ {
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; struct xarray *xa = &hr_dev->qp_table_xa;
int ret; int ret;
if (!qpn) if (!qpn)
return -EINVAL; return -EINVAL;
hr_qp->qpn = qpn; hr_qp->qpn = qpn;
spin_lock_irq(&qp_table->lock);
ret = radix_tree_insert(&hr_dev->qp_table_tree,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock);
if (ret) {
dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
goto err_put_irrl;
}
atomic_set(&hr_qp->refcount, 1); atomic_set(&hr_qp->refcount, 1);
init_completion(&hr_qp->free); init_completion(&hr_qp->free);
return 0; ret = xa_err(xa_store_irq(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1),
hr_qp, GFP_KERNEL));
err_put_irrl: if (ret)
dev_err(hr_dev->dev, "QPC xa_store failed\n");
return ret; return ret;
} }
...@@ -220,17 +208,9 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ...@@ -220,17 +208,9 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
} }
} }
spin_lock_irq(&qp_table->lock); ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
ret = radix_tree_insert(&hr_dev->qp_table_tree, if (ret)
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock);
if (ret) {
dev_err(dev, "QPC radix_tree_insert failed\n");
goto err_put_sccc; goto err_put_sccc;
}
atomic_set(&hr_qp->refcount, 1);
init_completion(&hr_qp->free);
return 0; return 0;
...@@ -255,13 +235,12 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, ...@@ -255,13 +235,12 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{ {
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; struct xarray *xa = &hr_dev->qp_table_xa;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&qp_table->lock, flags); xa_lock_irqsave(xa, flags);
radix_tree_delete(&hr_dev->qp_table_tree, __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
hr_qp->qpn & (hr_dev->caps.num_qps - 1)); xa_unlock_irqrestore(xa, flags);
spin_unlock_irqrestore(&qp_table->lock, flags);
} }
EXPORT_SYMBOL_GPL(hns_roce_qp_remove); EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
...@@ -1157,8 +1136,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) ...@@ -1157,8 +1136,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
int ret; int ret;
mutex_init(&qp_table->scc_mutex); mutex_init(&qp_table->scc_mutex);
spin_lock_init(&qp_table->lock); xa_init(&hr_dev->qp_table_xa);
INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
/* In hw v1, a port include two SQP, six ports total 12 */ /* In hw v1, a port include two SQP, six ports total 12 */
if (hr_dev->caps.max_sq_sg <= 2) if (hr_dev->caps.max_sq_sg <= 2)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment