Commit 60c3becf authored by Xi Wang's avatar Xi Wang Committed by Jason Gunthorpe

RDMA/hns: Fix sg offset non-zero issue

When run perftest in many times, the system will report a BUG as follows:

   BUG: Bad rss-counter state mm:(____ptrval____) idx:0 val:-1
   BUG: Bad rss-counter state mm:(____ptrval____) idx:1 val:1

We tested with different kernel version and found it started from the the
following commit:

commit d10bcf94 ("RDMA/umem: Combine contiguous PAGE_SIZE regions in
SGEs")

In this commit, the sg->offset is always 0 when sg_set_page() is called in
ib_umem_get() and the drivers are not allowed to change the sgl, otherwise
it will get bad page descriptor when unfolding SGEs in __ib_umem_release()
as sg_page_count() will get wrong result while sgl->offset is not 0.

However, there is a weird sgl usage in the current hns driver, the driver
modified sg->offset after calling ib_umem_get(), which caused we iterate
past the wrong number of pages in for_each_sg_page iterator.

This patch fixes it by correcting the non-standard sgl usage found in the
hns_roce_db_map_user() function.

Fixes: d10bcf94 ("RDMA/umem: Combine contiguous PAGE_SIZE regions in SGEs")
Fixes: 0425e3e6 ("RDMA/hns: Support flush cqe for hip08 in kernel space")
Link: https://lore.kernel.org/r/1562808737-45723-1-git-send-email-oulijun@huawei.comSigned-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent d5121ffe
...@@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, ...@@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt, struct ib_udata *udata, unsigned long virt,
struct hns_roce_db *db) struct hns_roce_db *db)
{ {
unsigned long page_addr = virt & PAGE_MASK;
struct hns_roce_user_db_page *page; struct hns_roce_user_db_page *page;
unsigned int offset;
int ret = 0; int ret = 0;
mutex_lock(&context->page_mutex); mutex_lock(&context->page_mutex);
list_for_each_entry(page, &context->page_list, list) list_for_each_entry(page, &context->page_list, list)
if (page->user_virt == (virt & PAGE_MASK)) if (page->user_virt == page_addr)
goto found; goto found;
page = kmalloc(sizeof(*page), GFP_KERNEL); page = kmalloc(sizeof(*page), GFP_KERNEL);
...@@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, ...@@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
} }
refcount_set(&page->refcount, 1); refcount_set(&page->refcount, 1);
page->user_virt = (virt & PAGE_MASK); page->user_virt = page_addr;
page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0); page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
if (IS_ERR(page->umem)) { if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem); ret = PTR_ERR(page->umem);
kfree(page); kfree(page);
...@@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, ...@@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
list_add(&page->list, &context->page_list); list_add(&page->list, &context->page_list);
found: found:
db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset = virt - page_addr;
(virt & ~PAGE_MASK); db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK; db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
db->virt_addr = sg_virt(page->umem->sg_head.sgl);
db->u.user_page = page; db->u.user_page = page;
refcount_inc(&page->refcount); refcount_inc(&page->refcount);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment