Commit 7f3969b1 authored by Chengchang Tang's avatar Chengchang Tang Committed by Jason Gunthorpe

RDMA/hns: Fix base address table allocation

For hns, the specification of an entry like resource (E.g. WQE/CQE/EQE)
depends on BT page size, buf page size and hopnum. For user mode, the buf
page size depends on UMEM. Therefore, the actual specification is
controlled by BT page size and hopnum.

The current BT page size and hopnum are obtained from firmware. This makes
the driver inflexible and introduces unnecessary constraints.  Resource
allocation failures occur in many scenarios.

This patch will calculate whether the BT page size set by firmware is
sufficient before allocating BT, and increase the BT page size if it is
insufficient.

Fixes: 11334014 ("RDMA/hns: Optimize base address table config flow for qp buffer")
Link: https://lore.kernel.org/r/20230512092245.344442-3-huangjunxian6@hisilicon.comSigned-off-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarJunxian Huang <huangjunxian6@hisilicon.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 58caa2a5
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include <linux/math.h>
#include "hns_roce_device.h" #include "hns_roce_device.h"
#include "hns_roce_cmd.h" #include "hns_roce_cmd.h"
#include "hns_roce_hem.h" #include "hns_roce_hem.h"
...@@ -909,6 +910,44 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, ...@@ -909,6 +910,44 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
return page_cnt; return page_cnt;
} }
static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
{
return int_pow(ba_per_bt, hopnum - 1);
}
static unsigned int cal_best_bt_pg_sz(struct hns_roce_dev *hr_dev,
struct hns_roce_mtr *mtr,
unsigned int pg_shift)
{
unsigned long cap = hr_dev->caps.page_size_cap;
struct hns_roce_buf_region *re;
unsigned int pgs_per_l1ba;
unsigned int ba_per_bt;
unsigned int ba_num;
int i;
for_each_set_bit_from(pg_shift, &cap, sizeof(cap) * BITS_PER_BYTE) {
if (!(BIT(pg_shift) & cap))
continue;
ba_per_bt = BIT(pg_shift) / BA_BYTE_LEN;
ba_num = 0;
for (i = 0; i < mtr->hem_cfg.region_count; i++) {
re = &mtr->hem_cfg.region[i];
if (re->hopnum == 0)
continue;
pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum);
ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba);
}
if (ba_num <= ba_per_bt)
return pg_shift;
}
return 0;
}
static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
unsigned int ba_page_shift) unsigned int ba_page_shift)
{ {
...@@ -917,6 +956,10 @@ static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, ...@@ -917,6 +956,10 @@ static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
hns_roce_hem_list_init(&mtr->hem_list); hns_roce_hem_list_init(&mtr->hem_list);
if (!cfg->is_direct) { if (!cfg->is_direct) {
ba_page_shift = cal_best_bt_pg_sz(hr_dev, mtr, ba_page_shift);
if (!ba_page_shift)
return -ERANGE;
ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list, ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
cfg->region, cfg->region_count, cfg->region, cfg->region_count,
ba_page_shift); ba_page_shift);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment