Commit eeb8461e authored by Yishai Hadas's avatar Yishai Hadas Committed by Roland Dreier

IB: Refactor umem to use linear SG table

This patch refactors the IB core umem code and vendor drivers to use a
linear (chained) SG table instead of chunk list.  With this change the
relevant code becomes clearer—no need for nested loops to build and
use umem.
Signed-off-by: default avatarShachar Raindel <raindel@mellanox.com>
Signed-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent cfbf8d48
...@@ -42,29 +42,29 @@ ...@@ -42,29 +42,29 @@
#include "uverbs.h" #include "uverbs.h"
#define IB_UMEM_MAX_PAGE_CHUNK \
((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
(void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{ {
struct ib_umem_chunk *chunk, *tmp; struct scatterlist *sg;
struct page *page;
int i; int i;
list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { if (umem->nmap > 0)
ib_dma_unmap_sg(dev, chunk->page_list, ib_dma_unmap_sg(dev, umem->sg_head.sgl,
chunk->nents, DMA_BIDIRECTIONAL); umem->nmap,
for (i = 0; i < chunk->nents; ++i) { DMA_BIDIRECTIONAL);
struct page *page = sg_page(&chunk->page_list[i]);
if (umem->writable && dirty) for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
set_page_dirty_lock(page);
put_page(page);
}
kfree(chunk); page = sg_page(sg);
if (umem->writable && dirty)
set_page_dirty_lock(page);
put_page(page);
} }
sg_free_table(&umem->sg_head);
return;
} }
/** /**
...@@ -81,15 +81,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -81,15 +81,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
struct ib_umem *umem; struct ib_umem *umem;
struct page **page_list; struct page **page_list;
struct vm_area_struct **vma_list; struct vm_area_struct **vma_list;
struct ib_umem_chunk *chunk;
unsigned long locked; unsigned long locked;
unsigned long lock_limit; unsigned long lock_limit;
unsigned long cur_base; unsigned long cur_base;
unsigned long npages; unsigned long npages;
int ret; int ret;
int off;
int i; int i;
DEFINE_DMA_ATTRS(attrs); DEFINE_DMA_ATTRS(attrs);
struct scatterlist *sg, *sg_list_start;
int need_release = 0;
if (dmasync) if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
...@@ -97,7 +97,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -97,7 +97,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (!can_do_mlock()) if (!can_do_mlock())
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
umem = kmalloc(sizeof *umem, GFP_KERNEL); umem = kzalloc(sizeof *umem, GFP_KERNEL);
if (!umem) if (!umem)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -117,8 +117,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -117,8 +117,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
/* We assume the memory is from hugetlb until proved otherwise */ /* We assume the memory is from hugetlb until proved otherwise */
umem->hugetlb = 1; umem->hugetlb = 1;
INIT_LIST_HEAD(&umem->chunk_list);
page_list = (struct page **) __get_free_page(GFP_KERNEL); page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) { if (!page_list) {
kfree(umem); kfree(umem);
...@@ -147,7 +145,18 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -147,7 +145,18 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
cur_base = addr & PAGE_MASK; cur_base = addr & PAGE_MASK;
ret = 0; if (npages == 0) {
ret = -EINVAL;
goto out;
}
ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
if (ret)
goto out;
need_release = 1;
sg_list_start = umem->sg_head.sgl;
while (npages) { while (npages) {
ret = get_user_pages(current, current->mm, cur_base, ret = get_user_pages(current, current->mm, cur_base,
min_t(unsigned long, npages, min_t(unsigned long, npages,
...@@ -157,54 +166,38 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -157,54 +166,38 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (ret < 0) if (ret < 0)
goto out; goto out;
umem->npages += ret;
cur_base += ret * PAGE_SIZE; cur_base += ret * PAGE_SIZE;
npages -= ret; npages -= ret;
off = 0; for_each_sg(sg_list_start, sg, ret, i) {
if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
while (ret) { umem->hugetlb = 0;
chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) *
min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK), sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
GFP_KERNEL);
if (!chunk) {
ret = -ENOMEM;
goto out;
}
chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
sg_init_table(chunk->page_list, chunk->nents);
for (i = 0; i < chunk->nents; ++i) {
if (vma_list &&
!is_vm_hugetlb_page(vma_list[i + off]))
umem->hugetlb = 0;
sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
}
chunk->nmap = ib_dma_map_sg_attrs(context->device,
&chunk->page_list[0],
chunk->nents,
DMA_BIDIRECTIONAL,
&attrs);
if (chunk->nmap <= 0) {
for (i = 0; i < chunk->nents; ++i)
put_page(sg_page(&chunk->page_list[i]));
kfree(chunk);
ret = -ENOMEM;
goto out;
}
ret -= chunk->nents;
off += chunk->nents;
list_add_tail(&chunk->list, &umem->chunk_list);
} }
ret = 0; /* preparing for next loop */
sg_list_start = sg;
} }
umem->nmap = ib_dma_map_sg_attrs(context->device,
umem->sg_head.sgl,
umem->npages,
DMA_BIDIRECTIONAL,
&attrs);
if (umem->nmap <= 0) {
ret = -ENOMEM;
goto out;
}
ret = 0;
out: out:
if (ret < 0) { if (ret < 0) {
__ib_umem_release(context->device, umem, 0); if (need_release)
__ib_umem_release(context->device, umem, 0);
kfree(umem); kfree(umem);
} else } else
current->mm->pinned_vm = locked; current->mm->pinned_vm = locked;
...@@ -278,17 +271,16 @@ EXPORT_SYMBOL(ib_umem_release); ...@@ -278,17 +271,16 @@ EXPORT_SYMBOL(ib_umem_release);
int ib_umem_page_count(struct ib_umem *umem) int ib_umem_page_count(struct ib_umem *umem)
{ {
struct ib_umem_chunk *chunk;
int shift; int shift;
int i; int i;
int n; int n;
struct scatterlist *sg;
shift = ilog2(umem->page_size); shift = ilog2(umem->page_size);
n = 0; n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
for (i = 0; i < chunk->nmap; ++i) n += sg_dma_len(sg) >> shift;
n += sg_dma_len(&chunk->page_list[i]) >> shift;
return n; return n;
} }
......
...@@ -431,9 +431,9 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -431,9 +431,9 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 *pages; u64 *pages;
u64 kva = 0; u64 kva = 0;
int shift, n, len; int shift, n, len;
int i, j, k; int i, k, entry;
int err = 0; int err = 0;
struct ib_umem_chunk *chunk; struct scatterlist *sg;
struct c2_pd *c2pd = to_c2pd(pd); struct c2_pd *c2pd = to_c2pd(pd);
struct c2_mr *c2mr; struct c2_mr *c2mr;
...@@ -452,10 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -452,10 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} }
shift = ffs(c2mr->umem->page_size) - 1; shift = ffs(c2mr->umem->page_size) - 1;
n = c2mr->umem->nmap;
n = 0;
list_for_each_entry(chunk, &c2mr->umem->chunk_list, list)
n += chunk->nents;
pages = kmalloc(n * sizeof(u64), GFP_KERNEL); pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
if (!pages) { if (!pages) {
...@@ -464,14 +461,12 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -464,14 +461,12 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} }
i = 0; i = 0;
list_for_each_entry(chunk, &c2mr->umem->chunk_list, list) { for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) {
for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(sg) >> shift;
len = sg_dma_len(&chunk->page_list[j]) >> shift; for (k = 0; k < len; ++k) {
for (k = 0; k < len; ++k) { pages[i++] =
pages[i++] = sg_dma_address(sg) +
sg_dma_address(&chunk->page_list[j]) + (c2mr->umem->page_size * k);
(c2mr->umem->page_size * k);
}
} }
} }
......
...@@ -618,14 +618,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -618,14 +618,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{ {
__be64 *pages; __be64 *pages;
int shift, n, len; int shift, n, len;
int i, j, k; int i, k, entry;
int err = 0; int err = 0;
struct ib_umem_chunk *chunk;
struct iwch_dev *rhp; struct iwch_dev *rhp;
struct iwch_pd *php; struct iwch_pd *php;
struct iwch_mr *mhp; struct iwch_mr *mhp;
struct iwch_reg_user_mr_resp uresp; struct iwch_reg_user_mr_resp uresp;
struct scatterlist *sg;
PDBG("%s ib_pd %p\n", __func__, pd); PDBG("%s ib_pd %p\n", __func__, pd);
php = to_iwch_pd(pd); php = to_iwch_pd(pd);
...@@ -645,9 +644,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -645,9 +644,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
shift = ffs(mhp->umem->page_size) - 1; shift = ffs(mhp->umem->page_size) - 1;
n = 0; n = mhp->umem->nmap;
list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
n += chunk->nents;
err = iwch_alloc_pbl(mhp, n); err = iwch_alloc_pbl(mhp, n);
if (err) if (err)
...@@ -661,12 +658,10 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -661,12 +658,10 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = n = 0; i = n = 0;
list_for_each_entry(chunk, &mhp->umem->chunk_list, list) for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(sg) >> shift;
len = sg_dma_len(&chunk->page_list[j]) >> shift;
for (k = 0; k < len; ++k) { for (k = 0; k < len; ++k) {
pages[i++] = cpu_to_be64(sg_dma_address( pages[i++] = cpu_to_be64(sg_dma_address(sg) +
&chunk->page_list[j]) +
mhp->umem->page_size * k); mhp->umem->page_size * k);
if (i == PAGE_SIZE / sizeof *pages) { if (i == PAGE_SIZE / sizeof *pages) {
err = iwch_write_pbl(mhp, pages, i, n); err = iwch_write_pbl(mhp, pages, i, n);
...@@ -676,7 +671,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -676,7 +671,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = 0; i = 0;
} }
} }
} }
if (i) if (i)
err = iwch_write_pbl(mhp, pages, i, n); err = iwch_write_pbl(mhp, pages, i, n);
......
...@@ -678,9 +678,9 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -678,9 +678,9 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{ {
__be64 *pages; __be64 *pages;
int shift, n, len; int shift, n, len;
int i, j, k; int i, k, entry;
int err = 0; int err = 0;
struct ib_umem_chunk *chunk; struct scatterlist *sg;
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_pd *php; struct c4iw_pd *php;
struct c4iw_mr *mhp; struct c4iw_mr *mhp;
...@@ -710,10 +710,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -710,10 +710,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
shift = ffs(mhp->umem->page_size) - 1; shift = ffs(mhp->umem->page_size) - 1;
n = 0; n = mhp->umem->nmap;
list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
n += chunk->nents;
err = alloc_pbl(mhp, n); err = alloc_pbl(mhp, n);
if (err) if (err)
goto err; goto err;
...@@ -726,24 +723,22 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -726,24 +723,22 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = n = 0; i = n = 0;
list_for_each_entry(chunk, &mhp->umem->chunk_list, list) for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(sg) >> shift;
len = sg_dma_len(&chunk->page_list[j]) >> shift; for (k = 0; k < len; ++k) {
for (k = 0; k < len; ++k) { pages[i++] = cpu_to_be64(sg_dma_address(sg) +
pages[i++] = cpu_to_be64(sg_dma_address( mhp->umem->page_size * k);
&chunk->page_list[j]) + if (i == PAGE_SIZE / sizeof *pages) {
mhp->umem->page_size * k); err = write_pbl(&mhp->rhp->rdev,
if (i == PAGE_SIZE / sizeof *pages) { pages,
err = write_pbl(&mhp->rhp->rdev, mhp->attr.pbl_addr + (n << 3), i);
pages, if (err)
mhp->attr.pbl_addr + (n << 3), i); goto pbl_done;
if (err) n += i;
goto pbl_done; i = 0;
n += i;
i = 0;
}
} }
} }
}
if (i) if (i)
err = write_pbl(&mhp->rhp->rdev, pages, err = write_pbl(&mhp->rhp->rdev, pages,
......
...@@ -322,7 +322,7 @@ struct ehca_mr_pginfo { ...@@ -322,7 +322,7 @@ struct ehca_mr_pginfo {
} phy; } phy;
struct { /* type EHCA_MR_PGI_USER section */ struct { /* type EHCA_MR_PGI_USER section */
struct ib_umem *region; struct ib_umem *region;
struct ib_umem_chunk *next_chunk; struct scatterlist *next_sg;
u64 next_nmap; u64 next_nmap;
} usr; } usr;
struct { /* type EHCA_MR_PGI_FMR section */ struct { /* type EHCA_MR_PGI_FMR section */
......
...@@ -400,10 +400,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -400,10 +400,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
pginfo.num_hwpages = num_hwpages; pginfo.num_hwpages = num_hwpages;
pginfo.u.usr.region = e_mr->umem; pginfo.u.usr.region = e_mr->umem;
pginfo.next_hwpage = e_mr->umem->offset / hwpage_size; pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk, pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
(&e_mr->umem->chunk_list),
list);
ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
&e_mr->ib.ib_mr.rkey, EHCA_REG_MR); &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
...@@ -1858,61 +1855,39 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo, ...@@ -1858,61 +1855,39 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
u64 *kpage) u64 *kpage)
{ {
int ret = 0; int ret = 0;
struct ib_umem_chunk *prev_chunk;
struct ib_umem_chunk *chunk;
u64 pgaddr; u64 pgaddr;
u32 i = 0;
u32 j = 0; u32 j = 0;
int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size; int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
struct scatterlist **sg = &pginfo->u.usr.next_sg;
/* loop over desired chunk entries */
chunk = pginfo->u.usr.next_chunk; while (*sg != NULL) {
prev_chunk = pginfo->u.usr.next_chunk; pgaddr = page_to_pfn(sg_page(*sg))
list_for_each_entry_continue( << PAGE_SHIFT;
chunk, (&(pginfo->u.usr.region->chunk_list)), list) { *kpage = pgaddr + (pginfo->next_hwpage *
for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { pginfo->hwpage_size);
pgaddr = page_to_pfn(sg_page(&chunk->page_list[i])) if (!(*kpage)) {
<< PAGE_SHIFT ; ehca_gen_err("pgaddr=%llx "
*kpage = pgaddr + (pginfo->next_hwpage * "sg_dma_address=%llx "
pginfo->hwpage_size); "entry=%llx next_hwpage=%llx",
if ( !(*kpage) ) { pgaddr, (u64)sg_dma_address(*sg),
ehca_gen_err("pgaddr=%llx " pginfo->u.usr.next_nmap,
"chunk->page_list[i]=%llx " pginfo->next_hwpage);
"i=%x next_hwpage=%llx", return -EFAULT;
pgaddr, (u64)sg_dma_address(
&chunk->page_list[i]),
i, pginfo->next_hwpage);
return -EFAULT;
}
(pginfo->hwpage_cnt)++;
(pginfo->next_hwpage)++;
kpage++;
if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
(pginfo->kpage_cnt)++;
(pginfo->u.usr.next_nmap)++;
pginfo->next_hwpage = 0;
i++;
}
j++;
if (j >= number) break;
} }
if ((pginfo->u.usr.next_nmap >= chunk->nmap) && (pginfo->hwpage_cnt)++;
(j >= number)) { (pginfo->next_hwpage)++;
pginfo->u.usr.next_nmap = 0; kpage++;
prev_chunk = chunk; if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
break; (pginfo->kpage_cnt)++;
} else if (pginfo->u.usr.next_nmap >= chunk->nmap) { (pginfo->u.usr.next_nmap)++;
pginfo->u.usr.next_nmap = 0; pginfo->next_hwpage = 0;
prev_chunk = chunk; *sg = sg_next(*sg);
} else if (j >= number) }
j++;
if (j >= number)
break; break;
else
prev_chunk = chunk;
} }
pginfo->u.usr.next_chunk =
list_prepare_entry(prev_chunk,
(&(pginfo->u.usr.region->chunk_list)),
list);
return ret; return ret;
} }
...@@ -1920,20 +1895,19 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo, ...@@ -1920,20 +1895,19 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
* check given pages for contiguous layout * check given pages for contiguous layout
* last page addr is returned in prev_pgaddr for further check * last page addr is returned in prev_pgaddr for further check
*/ */
static int ehca_check_kpages_per_ate(struct scatterlist *page_list, static int ehca_check_kpages_per_ate(struct scatterlist **sg,
int start_idx, int end_idx, int num_pages,
u64 *prev_pgaddr) u64 *prev_pgaddr)
{ {
int t; for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) {
for (t = start_idx; t <= end_idx; t++) { u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT;
u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
if (ehca_debug_level >= 3) if (ehca_debug_level >= 3)
ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr, ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
*(u64 *)__va(pgaddr)); *(u64 *)__va(pgaddr));
if (pgaddr - PAGE_SIZE != *prev_pgaddr) { if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
ehca_gen_err("uncontiguous page found pgaddr=%llx " ehca_gen_err("uncontiguous page found pgaddr=%llx "
"prev_pgaddr=%llx page_list_i=%x", "prev_pgaddr=%llx entries_left_in_hwpage=%x",
pgaddr, *prev_pgaddr, t); pgaddr, *prev_pgaddr, num_pages);
return -EINVAL; return -EINVAL;
} }
*prev_pgaddr = pgaddr; *prev_pgaddr = pgaddr;
...@@ -1947,111 +1921,80 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, ...@@ -1947,111 +1921,80 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
u64 *kpage) u64 *kpage)
{ {
int ret = 0; int ret = 0;
struct ib_umem_chunk *prev_chunk;
struct ib_umem_chunk *chunk;
u64 pgaddr, prev_pgaddr; u64 pgaddr, prev_pgaddr;
u32 i = 0;
u32 j = 0; u32 j = 0;
int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE; int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
int nr_kpages = kpages_per_hwpage; int nr_kpages = kpages_per_hwpage;
struct scatterlist **sg = &pginfo->u.usr.next_sg;
while (*sg != NULL) {
/* loop over desired chunk entries */ if (nr_kpages == kpages_per_hwpage) {
chunk = pginfo->u.usr.next_chunk; pgaddr = (page_to_pfn(sg_page(*sg))
prev_chunk = pginfo->u.usr.next_chunk; << PAGE_SHIFT);
list_for_each_entry_continue( *kpage = pgaddr;
chunk, (&(pginfo->u.usr.region->chunk_list)), list) { if (!(*kpage)) {
for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) { ehca_gen_err("pgaddr=%llx entry=%llx",
if (nr_kpages == kpages_per_hwpage) { pgaddr, pginfo->u.usr.next_nmap);
pgaddr = ( page_to_pfn(sg_page(&chunk->page_list[i])) ret = -EFAULT;
<< PAGE_SHIFT ); return ret;
*kpage = pgaddr; }
if ( !(*kpage) ) { /*
ehca_gen_err("pgaddr=%llx i=%x", * The first page in a hwpage must be aligned;
pgaddr, i); * the first MR page is exempt from this rule.
*/
if (pgaddr & (pginfo->hwpage_size - 1)) {
if (pginfo->hwpage_cnt) {
ehca_gen_err(
"invalid alignment "
"pgaddr=%llx entry=%llx "
"mr_pgsize=%llx",
pgaddr, pginfo->u.usr.next_nmap,
pginfo->hwpage_size);
ret = -EFAULT; ret = -EFAULT;
return ret; return ret;
} }
/* /* first MR page */
* The first page in a hwpage must be aligned; pginfo->kpage_cnt =
* the first MR page is exempt from this rule. (pgaddr &
*/ (pginfo->hwpage_size - 1)) >>
if (pgaddr & (pginfo->hwpage_size - 1)) { PAGE_SHIFT;
if (pginfo->hwpage_cnt) { nr_kpages -= pginfo->kpage_cnt;
ehca_gen_err( *kpage = pgaddr &
"invalid alignment " ~(pginfo->hwpage_size - 1);
"pgaddr=%llx i=%x "
"mr_pgsize=%llx",
pgaddr, i,
pginfo->hwpage_size);
ret = -EFAULT;
return ret;
}
/* first MR page */
pginfo->kpage_cnt =
(pgaddr &
(pginfo->hwpage_size - 1)) >>
PAGE_SHIFT;
nr_kpages -= pginfo->kpage_cnt;
*kpage = pgaddr &
~(pginfo->hwpage_size - 1);
}
if (ehca_debug_level >= 3) {
u64 val = *(u64 *)__va(pgaddr);
ehca_gen_dbg("kpage=%llx chunk_page=%llx "
"value=%016llx",
*kpage, pgaddr, val);
}
prev_pgaddr = pgaddr;
i++;
pginfo->kpage_cnt++;
pginfo->u.usr.next_nmap++;
nr_kpages--;
if (!nr_kpages)
goto next_kpage;
continue;
} }
if (i + nr_kpages > chunk->nmap) { if (ehca_debug_level >= 3) {
ret = ehca_check_kpages_per_ate( u64 val = *(u64 *)__va(pgaddr);
chunk->page_list, i, ehca_gen_dbg("kpage=%llx page=%llx "
chunk->nmap - 1, &prev_pgaddr); "value=%016llx",
if (ret) return ret; *kpage, pgaddr, val);
pginfo->kpage_cnt += chunk->nmap - i;
pginfo->u.usr.next_nmap += chunk->nmap - i;
nr_kpages -= chunk->nmap - i;
break;
} }
prev_pgaddr = pgaddr;
*sg = sg_next(*sg);
pginfo->kpage_cnt++;
pginfo->u.usr.next_nmap++;
nr_kpages--;
if (!nr_kpages)
goto next_kpage;
continue;
}
ret = ehca_check_kpages_per_ate(sg, nr_kpages,
&prev_pgaddr);
if (ret)
return ret;
pginfo->kpage_cnt += nr_kpages;
pginfo->u.usr.next_nmap += nr_kpages;
ret = ehca_check_kpages_per_ate(chunk->page_list, i,
i + nr_kpages - 1,
&prev_pgaddr);
if (ret) return ret;
i += nr_kpages;
pginfo->kpage_cnt += nr_kpages;
pginfo->u.usr.next_nmap += nr_kpages;
next_kpage: next_kpage:
nr_kpages = kpages_per_hwpage; nr_kpages = kpages_per_hwpage;
(pginfo->hwpage_cnt)++; (pginfo->hwpage_cnt)++;
kpage++; kpage++;
j++; j++;
if (j >= number) break; if (j >= number)
}
if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
(j >= number)) {
pginfo->u.usr.next_nmap = 0;
prev_chunk = chunk;
break;
} else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
pginfo->u.usr.next_nmap = 0;
prev_chunk = chunk;
} else if (j >= number)
break; break;
else
prev_chunk = chunk;
} }
pginfo->u.usr.next_chunk =
list_prepare_entry(prev_chunk,
(&(pginfo->u.usr.region->chunk_list)),
list);
return ret; return ret;
} }
......
...@@ -188,8 +188,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -188,8 +188,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{ {
struct ipath_mr *mr; struct ipath_mr *mr;
struct ib_umem *umem; struct ib_umem *umem;
struct ib_umem_chunk *chunk; int n, m, entry;
int n, m, i; struct scatterlist *sg;
struct ib_mr *ret; struct ib_mr *ret;
if (length == 0) { if (length == 0) {
...@@ -202,10 +202,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -202,10 +202,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ERR(umem)) if (IS_ERR(umem))
return (void *) umem; return (void *) umem;
n = 0; n = umem->nmap;
list_for_each_entry(chunk, &umem->chunk_list, list)
n += chunk->nents;
mr = alloc_mr(n, &to_idev(pd->device)->lk_table); mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
if (!mr) { if (!mr) {
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
...@@ -224,22 +221,20 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -224,22 +221,20 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
m = 0; m = 0;
n = 0; n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
for (i = 0; i < chunk->nents; i++) { void *vaddr;
void *vaddr;
vaddr = page_address(sg_page(sg));
vaddr = page_address(sg_page(&chunk->page_list[i])); if (!vaddr) {
if (!vaddr) { ret = ERR_PTR(-EINVAL);
ret = ERR_PTR(-EINVAL); goto bail;
goto bail; }
} mr->mr.map[m]->segs[n].vaddr = vaddr;
mr->mr.map[m]->segs[n].vaddr = vaddr; mr->mr.map[m]->segs[n].length = umem->page_size;
mr->mr.map[m]->segs[n].length = umem->page_size; n++;
n++; if (n == IPATH_SEGSZ) {
if (n == IPATH_SEGSZ) { m++;
m++; n = 0;
n = 0;
}
} }
} }
ret = &mr->ibmr; ret = &mr->ibmr;
......
...@@ -45,7 +45,6 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, ...@@ -45,7 +45,6 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
struct mlx4_db *db) struct mlx4_db *db)
{ {
struct mlx4_ib_user_db_page *page; struct mlx4_ib_user_db_page *page;
struct ib_umem_chunk *chunk;
int err = 0; int err = 0;
mutex_lock(&context->db_page_mutex); mutex_lock(&context->db_page_mutex);
...@@ -73,8 +72,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, ...@@ -73,8 +72,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
list_add(&page->list, &context->db_page_list); list_add(&page->list, &context->db_page_list);
found: found:
chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list); db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
db->u.user_page = page; db->u.user_page = page;
++page->refcnt; ++page->refcnt;
......
...@@ -90,11 +90,11 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, ...@@ -90,11 +90,11 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem) struct ib_umem *umem)
{ {
u64 *pages; u64 *pages;
struct ib_umem_chunk *chunk; int i, k, entry;
int i, j, k;
int n; int n;
int len; int len;
int err = 0; int err = 0;
struct scatterlist *sg;
pages = (u64 *) __get_free_page(GFP_KERNEL); pages = (u64 *) __get_free_page(GFP_KERNEL);
if (!pages) if (!pages)
...@@ -102,26 +102,25 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, ...@@ -102,26 +102,25 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
i = n = 0; i = n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(sg) >> mtt->page_shift;
len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift; for (k = 0; k < len; ++k) {
for (k = 0; k < len; ++k) { pages[i++] = sg_dma_address(sg) +
pages[i++] = sg_dma_address(&chunk->page_list[j]) + umem->page_size * k;
umem->page_size * k; /*
/* * Be friendly to mlx4_write_mtt() and
* Be friendly to mlx4_write_mtt() and * pass it chunks of appropriate size.
* pass it chunks of appropriate size. */
*/ if (i == PAGE_SIZE / sizeof (u64)) {
if (i == PAGE_SIZE / sizeof (u64)) { err = mlx4_write_mtt(dev->dev, mtt, n,
err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
i, pages); if (err)
if (err) goto out;
goto out; n += i;
n += i; i = 0;
i = 0;
}
} }
} }
}
if (i) if (i)
err = mlx4_write_mtt(dev->dev, mtt, n, i, pages); err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
......
...@@ -47,7 +47,6 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, ...@@ -47,7 +47,6 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
struct mlx5_db *db) struct mlx5_db *db)
{ {
struct mlx5_ib_user_db_page *page; struct mlx5_ib_user_db_page *page;
struct ib_umem_chunk *chunk;
int err = 0; int err = 0;
mutex_lock(&context->db_page_mutex); mutex_lock(&context->db_page_mutex);
...@@ -75,8 +74,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, ...@@ -75,8 +74,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
list_add(&page->list, &context->db_page_list); list_add(&page->list, &context->db_page_list);
found: found:
chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list); db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
db->u.user_page = page; db->u.user_page = page;
++page->refcnt; ++page->refcnt;
......
...@@ -44,16 +44,17 @@ ...@@ -44,16 +44,17 @@
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
int *ncont, int *order) int *ncont, int *order)
{ {
struct ib_umem_chunk *chunk;
unsigned long tmp; unsigned long tmp;
unsigned long m; unsigned long m;
int i, j, k; int i, k;
u64 base = 0; u64 base = 0;
int p = 0; int p = 0;
int skip; int skip;
int mask; int mask;
u64 len; u64 len;
u64 pfn; u64 pfn;
struct scatterlist *sg;
int entry;
addr = addr >> PAGE_SHIFT; addr = addr >> PAGE_SHIFT;
tmp = (unsigned long)addr; tmp = (unsigned long)addr;
...@@ -61,32 +62,31 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, ...@@ -61,32 +62,31 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
skip = 1 << m; skip = 1 << m;
mask = skip - 1; mask = skip - 1;
i = 0; i = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
for (j = 0; j < chunk->nmap; j++) { len = sg_dma_len(sg) >> PAGE_SHIFT;
len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; pfn = sg_dma_address(sg) >> PAGE_SHIFT;
pfn = sg_dma_address(&chunk->page_list[j]) >> PAGE_SHIFT; for (k = 0; k < len; k++) {
for (k = 0; k < len; k++) { if (!(i & mask)) {
if (!(i & mask)) { tmp = (unsigned long)pfn;
tmp = (unsigned long)pfn; m = min(m, find_first_bit(&tmp, sizeof(tmp)));
m = min(m, find_first_bit(&tmp, sizeof(tmp))); skip = 1 << m;
mask = skip - 1;
base = pfn;
p = 0;
} else {
if (base + p != pfn) {
tmp = (unsigned long)p;
m = find_first_bit(&tmp, sizeof(tmp));
skip = 1 << m; skip = 1 << m;
mask = skip - 1; mask = skip - 1;
base = pfn; base = pfn;
p = 0; p = 0;
} else {
if (base + p != pfn) {
tmp = (unsigned long)p;
m = find_first_bit(&tmp, sizeof(tmp));
skip = 1 << m;
mask = skip - 1;
base = pfn;
p = 0;
}
} }
p++;
i++;
} }
p++;
i++;
} }
}
if (i) { if (i) {
m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m); m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
...@@ -112,32 +112,32 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, ...@@ -112,32 +112,32 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
{ {
int shift = page_shift - PAGE_SHIFT; int shift = page_shift - PAGE_SHIFT;
int mask = (1 << shift) - 1; int mask = (1 << shift) - 1;
struct ib_umem_chunk *chunk; int i, k;
int i, j, k;
u64 cur = 0; u64 cur = 0;
u64 base; u64 base;
int len; int len;
struct scatterlist *sg;
int entry;
i = 0; i = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
for (j = 0; j < chunk->nmap; j++) { len = sg_dma_len(sg) >> PAGE_SHIFT;
len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; base = sg_dma_address(sg);
base = sg_dma_address(&chunk->page_list[j]); for (k = 0; k < len; k++) {
for (k = 0; k < len; k++) { if (!(i & mask)) {
if (!(i & mask)) { cur = base + (k << PAGE_SHIFT);
cur = base + (k << PAGE_SHIFT); if (umr)
if (umr) cur |= 3;
cur |= 3;
pas[i >> shift] = cpu_to_be64(cur); pas[i >> shift] = cpu_to_be64(cur);
mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n", mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
i >> shift, be64_to_cpu(pas[i >> shift])); i >> shift, be64_to_cpu(pas[i >> shift]));
} else } else
mlx5_ib_dbg(dev, "=====> 0x%llx\n", mlx5_ib_dbg(dev, "=====> 0x%llx\n",
base + (k << PAGE_SHIFT)); base + (k << PAGE_SHIFT));
i++; i++;
}
} }
}
} }
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
......
...@@ -976,12 +976,12 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -976,12 +976,12 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata) u64 virt, int acc, struct ib_udata *udata)
{ {
struct mthca_dev *dev = to_mdev(pd->device); struct mthca_dev *dev = to_mdev(pd->device);
struct ib_umem_chunk *chunk; struct scatterlist *sg;
struct mthca_mr *mr; struct mthca_mr *mr;
struct mthca_reg_mr ucmd; struct mthca_reg_mr ucmd;
u64 *pages; u64 *pages;
int shift, n, len; int shift, n, len;
int i, j, k; int i, k, entry;
int err = 0; int err = 0;
int write_mtt_size; int write_mtt_size;
...@@ -1009,10 +1009,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1009,10 +1009,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} }
shift = ffs(mr->umem->page_size) - 1; shift = ffs(mr->umem->page_size) - 1;
n = mr->umem->nmap;
n = 0;
list_for_each_entry(chunk, &mr->umem->chunk_list, list)
n += chunk->nents;
mr->mtt = mthca_alloc_mtt(dev, n); mr->mtt = mthca_alloc_mtt(dev, n);
if (IS_ERR(mr->mtt)) { if (IS_ERR(mr->mtt)) {
...@@ -1030,25 +1027,24 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1030,25 +1027,24 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
list_for_each_entry(chunk, &mr->umem->chunk_list, list) for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(sg) >> shift;
len = sg_dma_len(&chunk->page_list[j]) >> shift; for (k = 0; k < len; ++k) {
for (k = 0; k < len; ++k) { pages[i++] = sg_dma_address(sg) +
pages[i++] = sg_dma_address(&chunk->page_list[j]) + mr->umem->page_size * k;
mr->umem->page_size * k; /*
/* * Be friendly to write_mtt and pass it chunks
* Be friendly to write_mtt and pass it chunks * of appropriate size.
* of appropriate size. */
*/ if (i == write_mtt_size) {
if (i == write_mtt_size) { err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
err = mthca_write_mtt(dev, mr->mtt, n, pages, i); if (err)
if (err) goto mtt_done;
goto mtt_done; n += i;
n += i; i = 0;
i = 0;
}
} }
} }
}
if (i) if (i)
err = mthca_write_mtt(dev, mr->mtt, n, pages, i); err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
......
...@@ -2307,7 +2307,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -2307,7 +2307,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct nes_device *nesdev = nesvnic->nesdev; struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_adapter *nesadapter = nesdev->nesadapter;
struct ib_mr *ibmr = ERR_PTR(-EINVAL); struct ib_mr *ibmr = ERR_PTR(-EINVAL);
struct ib_umem_chunk *chunk; struct scatterlist *sg;
struct nes_ucontext *nes_ucontext; struct nes_ucontext *nes_ucontext;
struct nes_pbl *nespbl; struct nes_pbl *nespbl;
struct nes_mr *nesmr; struct nes_mr *nesmr;
...@@ -2315,7 +2315,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -2315,7 +2315,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct nes_mem_reg_req req; struct nes_mem_reg_req req;
struct nes_vpbl vpbl; struct nes_vpbl vpbl;
struct nes_root_vpbl root_vpbl; struct nes_root_vpbl root_vpbl;
int nmap_index, page_index; int entry, page_index;
int page_count = 0; int page_count = 0;
int err, pbl_depth = 0; int err, pbl_depth = 0;
int chunk_pages; int chunk_pages;
...@@ -2330,6 +2330,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -2330,6 +2330,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u16 pbl_count; u16 pbl_count;
u8 single_page = 1; u8 single_page = 1;
u8 stag_key; u8 stag_key;
int first_page = 1;
region = ib_umem_get(pd->uobject->context, start, length, acc, 0); region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
if (IS_ERR(region)) { if (IS_ERR(region)) {
...@@ -2380,128 +2381,125 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -2380,128 +2381,125 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} }
nesmr->region = region; nesmr->region = region;
list_for_each_entry(chunk, &region->chunk_list, list) { for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
nes_debug(NES_DBG_MR, "Chunk: nents = %u, nmap = %u .\n", if (sg_dma_address(sg) & ~PAGE_MASK) {
chunk->nents, chunk->nmap); ib_umem_release(region);
for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) { nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
if (sg_dma_address(&chunk->page_list[nmap_index]) & ~PAGE_MASK) { nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
ib_umem_release(region); (unsigned int) sg_dma_address(sg));
nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); ibmr = ERR_PTR(-EINVAL);
nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n", kfree(nesmr);
(unsigned int) sg_dma_address(&chunk->page_list[nmap_index])); goto reg_user_mr_err;
ibmr = ERR_PTR(-EINVAL); }
kfree(nesmr);
goto reg_user_mr_err;
}
if (!sg_dma_len(&chunk->page_list[nmap_index])) { if (!sg_dma_len(sg)) {
ib_umem_release(region); ib_umem_release(region);
nes_free_resource(nesadapter, nesadapter->allocated_mrs, nes_free_resource(nesadapter, nesadapter->allocated_mrs,
stag_index); stag_index);
nes_debug(NES_DBG_MR, "Invalid Buffer Size\n"); nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
ibmr = ERR_PTR(-EINVAL); ibmr = ERR_PTR(-EINVAL);
kfree(nesmr); kfree(nesmr);
goto reg_user_mr_err; goto reg_user_mr_err;
} }
region_length += sg_dma_len(&chunk->page_list[nmap_index]); region_length += sg_dma_len(sg);
chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12; chunk_pages = sg_dma_len(sg) >> 12;
region_length -= skip_pages << 12; region_length -= skip_pages << 12;
for (page_index=skip_pages; page_index < chunk_pages; page_index++) { for (page_index = skip_pages; page_index < chunk_pages; page_index++) {
skip_pages = 0; skip_pages = 0;
if ((page_count!=0)&&(page_count<<12)-(region->offset&(4096-1))>=region->length) if ((page_count != 0) && (page_count<<12)-(region->offset&(4096-1)) >= region->length)
goto enough_pages; goto enough_pages;
if ((page_count&0x01FF) == 0) { if ((page_count&0x01FF) == 0) {
if (page_count >= 1024 * 512) { if (page_count >= 1024 * 512) {
ib_umem_release(region);
nes_free_resource(nesadapter,
nesadapter->allocated_mrs, stag_index);
kfree(nesmr);
ibmr = ERR_PTR(-E2BIG);
goto reg_user_mr_err;
}
if (root_pbl_index == 1) {
root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
8192, &root_vpbl.pbl_pbase);
nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
if (!root_vpbl.pbl_vbase) {
ib_umem_release(region); ib_umem_release(region);
nes_free_resource(nesadapter, pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
nesadapter->allocated_mrs, stag_index); vpbl.pbl_pbase);
nes_free_resource(nesadapter, nesadapter->allocated_mrs,
stag_index);
kfree(nesmr); kfree(nesmr);
ibmr = ERR_PTR(-E2BIG); ibmr = ERR_PTR(-ENOMEM);
goto reg_user_mr_err; goto reg_user_mr_err;
} }
if (root_pbl_index == 1) { root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024,
root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, GFP_KERNEL);
8192, &root_vpbl.pbl_pbase); if (!root_vpbl.leaf_vpbl) {
nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
if (!root_vpbl.pbl_vbase) {
ib_umem_release(region);
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
vpbl.pbl_pbase);
nes_free_resource(nesadapter, nesadapter->allocated_mrs,
stag_index);
kfree(nesmr);
ibmr = ERR_PTR(-ENOMEM);
goto reg_user_mr_err;
}
root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024,
GFP_KERNEL);
if (!root_vpbl.leaf_vpbl) {
ib_umem_release(region);
pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
root_vpbl.pbl_pbase);
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
vpbl.pbl_pbase);
nes_free_resource(nesadapter, nesadapter->allocated_mrs,
stag_index);
kfree(nesmr);
ibmr = ERR_PTR(-ENOMEM);
goto reg_user_mr_err;
}
root_vpbl.pbl_vbase[0].pa_low =
cpu_to_le32((u32)vpbl.pbl_pbase);
root_vpbl.pbl_vbase[0].pa_high =
cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
root_vpbl.leaf_vpbl[0] = vpbl;
}
vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
&vpbl.pbl_pbase);
nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
if (!vpbl.pbl_vbase) {
ib_umem_release(region); ib_umem_release(region);
nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index); pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
ibmr = ERR_PTR(-ENOMEM); root_vpbl.pbl_pbase);
pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
vpbl.pbl_pbase);
nes_free_resource(nesadapter, nesadapter->allocated_mrs,
stag_index);
kfree(nesmr); kfree(nesmr);
ibmr = ERR_PTR(-ENOMEM);
goto reg_user_mr_err; goto reg_user_mr_err;
} }
if (1 <= root_pbl_index) { root_vpbl.pbl_vbase[0].pa_low =
root_vpbl.pbl_vbase[root_pbl_index].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase);
cpu_to_le32((u32)vpbl.pbl_pbase); root_vpbl.pbl_vbase[0].pa_high =
root_vpbl.pbl_vbase[root_pbl_index].pa_high = cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32))); root_vpbl.leaf_vpbl[0] = vpbl;
root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
}
root_pbl_index++;
cur_pbl_index = 0;
} }
if (single_page) { vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
if (page_count != 0) { &vpbl.pbl_pbase);
if ((last_dma_addr+4096) != nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
(sg_dma_address(&chunk->page_list[nmap_index])+ vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
(page_index*4096))) if (!vpbl.pbl_vbase) {
single_page = 0; ib_umem_release(region);
last_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
(page_index*4096); ibmr = ERR_PTR(-ENOMEM);
} else { kfree(nesmr);
first_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+ goto reg_user_mr_err;
(page_index*4096); }
last_dma_addr = first_dma_addr; if (1 <= root_pbl_index) {
} root_vpbl.pbl_vbase[root_pbl_index].pa_low =
cpu_to_le32((u32)vpbl.pbl_pbase);
root_vpbl.pbl_vbase[root_pbl_index].pa_high =
cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
}
root_pbl_index++;
cur_pbl_index = 0;
}
if (single_page) {
if (page_count != 0) {
if ((last_dma_addr+4096) !=
(sg_dma_address(sg)+
(page_index*4096)))
single_page = 0;
last_dma_addr = sg_dma_address(sg)+
(page_index*4096);
} else {
first_dma_addr = sg_dma_address(sg)+
(page_index*4096);
last_dma_addr = first_dma_addr;
} }
vpbl.pbl_vbase[cur_pbl_index].pa_low =
cpu_to_le32((u32)(sg_dma_address(&chunk->page_list[nmap_index])+
(page_index*4096)));
vpbl.pbl_vbase[cur_pbl_index].pa_high =
cpu_to_le32((u32)((((u64)(sg_dma_address(&chunk->page_list[nmap_index])+
(page_index*4096))) >> 32)));
cur_pbl_index++;
page_count++;
} }
vpbl.pbl_vbase[cur_pbl_index].pa_low =
cpu_to_le32((u32)(sg_dma_address(sg)+
(page_index*4096)));
vpbl.pbl_vbase[cur_pbl_index].pa_high =
cpu_to_le32((u32)((((u64)(sg_dma_address(sg)+
(page_index*4096))) >> 32)));
cur_pbl_index++;
page_count++;
} }
} }
enough_pages: enough_pages:
nes_debug(NES_DBG_MR, "calculating stag, stag_index=0x%08x, driver_key=0x%08x," nes_debug(NES_DBG_MR, "calculating stag, stag_index=0x%08x, driver_key=0x%08x,"
" stag_key=0x%08x\n", " stag_key=0x%08x\n",
...@@ -2613,25 +2611,28 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -2613,25 +2611,28 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
nespbl->pbl_size, (unsigned long) nespbl->pbl_pbase, nespbl->pbl_size, (unsigned long) nespbl->pbl_pbase,
(void *) nespbl->pbl_vbase, nespbl->user_base); (void *) nespbl->pbl_vbase, nespbl->user_base);
list_for_each_entry(chunk, &region->chunk_list, list) { for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) { chunk_pages = sg_dma_len(sg) >> 12;
chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12; chunk_pages += (sg_dma_len(sg) & (4096-1)) ? 1 : 0;
chunk_pages += (sg_dma_len(&chunk->page_list[nmap_index]) & (4096-1)) ? 1 : 0; if (first_page) {
nespbl->page = sg_page(&chunk->page_list[0]); nespbl->page = sg_page(sg);
for (page_index=0; page_index<chunk_pages; page_index++) { first_page = 0;
((__le32 *)pbl)[0] = cpu_to_le32((u32) }
(sg_dma_address(&chunk->page_list[nmap_index])+
(page_index*4096))); for (page_index = 0; page_index < chunk_pages; page_index++) {
((__le32 *)pbl)[1] = cpu_to_le32(((u64) ((__le32 *)pbl)[0] = cpu_to_le32((u32)
(sg_dma_address(&chunk->page_list[nmap_index])+ (sg_dma_address(sg)+
(page_index*4096)))>>32); (page_index*4096)));
nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl, ((__le32 *)pbl)[1] = cpu_to_le32(((u64)
(unsigned long long)*pbl, (sg_dma_address(sg)+
le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0])); (page_index*4096)))>>32);
pbl++; nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl,
} (unsigned long long)*pbl,
le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0]));
pbl++;
} }
} }
if (req.reg_type == IWNES_MEMREG_TYPE_QP) { if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
list_add_tail(&nespbl->list, &nes_ucontext->qp_reg_mem_list); list_add_tail(&nespbl->list, &nes_ucontext->qp_reg_mem_list);
} else { } else {
......
...@@ -726,10 +726,10 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, ...@@ -726,10 +726,10 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
u32 num_pbes) u32 num_pbes)
{ {
struct ocrdma_pbe *pbe; struct ocrdma_pbe *pbe;
struct ib_umem_chunk *chunk; struct scatterlist *sg;
struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
struct ib_umem *umem = mr->umem; struct ib_umem *umem = mr->umem;
int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
if (!mr->hwmr.num_pbes) if (!mr->hwmr.num_pbes)
return; return;
...@@ -739,39 +739,37 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, ...@@ -739,39 +739,37 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
shift = ilog2(umem->page_size); shift = ilog2(umem->page_size);
list_for_each_entry(chunk, &umem->chunk_list, list) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
/* get all the dma regions from the chunk. */ pages = sg_dma_len(sg) >> shift;
for (i = 0; i < chunk->nmap; i++) { for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
pages = sg_dma_len(&chunk->page_list[i]) >> shift; /* store the page address in pbe */
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { pbe->pa_lo =
/* store the page address in pbe */ cpu_to_le32(sg_dma_address
pbe->pa_lo = (sg) +
cpu_to_le32(sg_dma_address (umem->page_size * pg_cnt));
(&chunk->page_list[i]) + pbe->pa_hi =
(umem->page_size * pg_cnt)); cpu_to_le32(upper_32_bits
pbe->pa_hi = ((sg_dma_address
cpu_to_le32(upper_32_bits (sg) +
((sg_dma_address umem->page_size * pg_cnt)));
(&chunk->page_list[i]) + pbe_cnt += 1;
umem->page_size * pg_cnt))); total_num_pbes += 1;
pbe_cnt += 1; pbe++;
total_num_pbes += 1;
pbe++; /* if done building pbes, issue the mbx cmd. */
if (total_num_pbes == num_pbes)
/* if done building pbes, issue the mbx cmd. */ return;
if (total_num_pbes == num_pbes)
return; /* if the given pbl is full storing the pbes,
* move to next pbl.
/* if the given pbl is full storing the pbes, */
* move to next pbl. if (pbe_cnt ==
*/ (mr->hwmr.pbl_size / sizeof(u64))) {
if (pbe_cnt == pbl_tbl++;
(mr->hwmr.pbl_size / sizeof(u64))) { pbe = (struct ocrdma_pbe *)pbl_tbl->va;
pbl_tbl++; pbe_cnt = 0;
pbe = (struct ocrdma_pbe *)pbl_tbl->va;
pbe_cnt = 0;
}
} }
} }
} }
} }
......
...@@ -232,8 +232,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -232,8 +232,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{ {
struct qib_mr *mr; struct qib_mr *mr;
struct ib_umem *umem; struct ib_umem *umem;
struct ib_umem_chunk *chunk; struct scatterlist *sg;
int n, m, i; int n, m, entry;
struct ib_mr *ret; struct ib_mr *ret;
if (length == 0) { if (length == 0) {
...@@ -246,9 +246,7 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -246,9 +246,7 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (IS_ERR(umem)) if (IS_ERR(umem))
return (void *) umem; return (void *) umem;
n = 0; n = umem->nmap;
list_for_each_entry(chunk, &umem->chunk_list, list)
n += chunk->nents;
mr = alloc_mr(n, pd); mr = alloc_mr(n, pd);
if (IS_ERR(mr)) { if (IS_ERR(mr)) {
...@@ -268,11 +266,10 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -268,11 +266,10 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mr.page_shift = ilog2(umem->page_size); mr->mr.page_shift = ilog2(umem->page_size);
m = 0; m = 0;
n = 0; n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
for (i = 0; i < chunk->nents; i++) {
void *vaddr; void *vaddr;
vaddr = page_address(sg_page(&chunk->page_list[i])); vaddr = page_address(sg_page(sg));
if (!vaddr) { if (!vaddr) {
ret = ERR_PTR(-EINVAL); ret = ERR_PTR(-EINVAL);
goto bail; goto bail;
...@@ -284,7 +281,6 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -284,7 +281,6 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
m++; m++;
n = 0; n = 0;
} }
}
} }
ret = &mr->ibmr; ret = &mr->ibmr;
......
...@@ -46,17 +46,12 @@ struct ib_umem { ...@@ -46,17 +46,12 @@ struct ib_umem {
int page_size; int page_size;
int writable; int writable;
int hugetlb; int hugetlb;
struct list_head chunk_list;
struct work_struct work; struct work_struct work;
struct mm_struct *mm; struct mm_struct *mm;
unsigned long diff; unsigned long diff;
}; struct sg_table sg_head;
int nmap;
struct ib_umem_chunk { int npages;
struct list_head list;
int nents;
int nmap;
struct scatterlist page_list[0];
}; };
#ifdef CONFIG_INFINIBAND_USER_MEM #ifdef CONFIG_INFINIBAND_USER_MEM
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment