Commit df17bfd4 authored by Hoang-Nam Nguyen's avatar Hoang-Nam Nguyen Committed by Roland Dreier

IB/ehca: MR/MW structure refactoring

- Rename struct ehca_mr fields to clearly distinguish between kernel
  and HW page size.
- Sort struct ehca_mr_pginfo into a common part and a union containing
  specific fields for physical, user and fast MR
Signed-off-by: default avatarJoachim Fenkes <fenkes@de.ibm.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 2492398e
...@@ -204,8 +204,8 @@ struct ehca_mr { ...@@ -204,8 +204,8 @@ struct ehca_mr {
spinlock_t mrlock; spinlock_t mrlock;
enum ehca_mr_flag flags; enum ehca_mr_flag flags;
u32 num_pages; /* number of MR pages */ u32 num_kpages; /* number of kernel pages */
u32 num_4k; /* number of 4k "page" portions to form MR */ u32 num_hwpages; /* number of hw pages to form MR */
int acl; /* ACL (stored here for usage in reregister) */ int acl; /* ACL (stored here for usage in reregister) */
u64 *start; /* virtual start address (stored here for */ u64 *start; /* virtual start address (stored here for */
/* usage in reregister) */ /* usage in reregister) */
...@@ -217,9 +217,6 @@ struct ehca_mr { ...@@ -217,9 +217,6 @@ struct ehca_mr {
/* fw specific data */ /* fw specific data */
struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */ struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
struct h_galpas galpas; struct h_galpas galpas;
/* data for userspace bridge */
u32 nr_of_pages;
void *pagearray;
}; };
struct ehca_mw { struct ehca_mw {
...@@ -241,26 +238,29 @@ enum ehca_mr_pgi_type { ...@@ -241,26 +238,29 @@ enum ehca_mr_pgi_type {
struct ehca_mr_pginfo { struct ehca_mr_pginfo {
enum ehca_mr_pgi_type type; enum ehca_mr_pgi_type type;
u64 num_pages; u64 num_kpages;
u64 page_cnt; u64 kpage_cnt;
u64 num_4k; /* number of 4k "page" portions */ u64 num_hwpages; /* number of hw pages */
u64 page_4k_cnt; /* counter for 4k "page" portions */ u64 hwpage_cnt; /* counter for hw pages */
u64 next_4k; /* next 4k "page" portion in buffer/chunk/listelem */ u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
/* type EHCA_MR_PGI_PHYS section */ union {
struct { /* type EHCA_MR_PGI_PHYS section */
int num_phys_buf; int num_phys_buf;
struct ib_phys_buf *phys_buf_array; struct ib_phys_buf *phys_buf_array;
u64 next_buf; u64 next_buf;
} phy;
/* type EHCA_MR_PGI_USER section */ struct { /* type EHCA_MR_PGI_USER section */
struct ib_umem *region; struct ib_umem *region;
struct ib_umem_chunk *next_chunk; struct ib_umem_chunk *next_chunk;
u64 next_nmap; u64 next_nmap;
} usr;
/* type EHCA_MR_PGI_FMR section */ struct { /* type EHCA_MR_PGI_FMR section */
u64 fmr_pgsize;
u64 *page_list; u64 *page_list;
u64 next_listelem; u64 next_listelem;
/* next_4k also used within EHCA_MR_PGI_FMR */ } fmr;
} u;
}; };
/* output parameters for MR/FMR hipz calls */ /* output parameters for MR/FMR hipz calls */
......
...@@ -150,9 +150,6 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, ...@@ -150,9 +150,6 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
u64 size; u64 size;
struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
u32 num_pages_mr;
u32 num_pages_4k; /* 4k portion "pages" */
if ((num_phys_buf <= 0) || !phys_buf_array) { if ((num_phys_buf <= 0) || !phys_buf_array) {
ehca_err(pd->device, "bad input values: num_phys_buf=%x " ehca_err(pd->device, "bad input values: num_phys_buf=%x "
...@@ -196,12 +193,6 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, ...@@ -196,12 +193,6 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
goto reg_phys_mr_exit0; goto reg_phys_mr_exit0;
} }
/* determine number of MR pages */
num_pages_mr = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
PAGE_SIZE);
num_pages_4k = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + size,
EHCA_PAGESIZE);
/* register MR on HCA */ /* register MR on HCA */
if (ehca_mr_is_maxmr(size, iova_start)) { if (ehca_mr_is_maxmr(size, iova_start)) {
e_mr->flags |= EHCA_MR_FLAG_MAXMR; e_mr->flags |= EHCA_MR_FLAG_MAXMR;
...@@ -213,12 +204,21 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, ...@@ -213,12 +204,21 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
goto reg_phys_mr_exit1; goto reg_phys_mr_exit1;
} }
} else { } else {
struct ehca_mr_pginfo pginfo;
u32 num_kpages;
u32 num_hwpages;
num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
PAGE_SIZE);
num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) +
size, EHCA_PAGESIZE);
memset(&pginfo, 0, sizeof(pginfo));
pginfo.type = EHCA_MR_PGI_PHYS; pginfo.type = EHCA_MR_PGI_PHYS;
pginfo.num_pages = num_pages_mr; pginfo.num_kpages = num_kpages;
pginfo.num_4k = num_pages_4k; pginfo.num_hwpages = num_hwpages;
pginfo.num_phys_buf = num_phys_buf; pginfo.u.phy.num_phys_buf = num_phys_buf;
pginfo.phys_buf_array = phys_buf_array; pginfo.u.phy.phys_buf_array = phys_buf_array;
pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) / pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) /
EHCA_PAGESIZE); EHCA_PAGESIZE);
ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
...@@ -254,10 +254,10 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt ...@@ -254,10 +254,10 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt
struct ehca_shca *shca = struct ehca_shca *shca =
container_of(pd->device, struct ehca_shca, ib_device); container_of(pd->device, struct ehca_shca, ib_device);
struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; struct ehca_mr_pginfo pginfo;
int ret; int ret;
u32 num_pages_mr; u32 num_kpages;
u32 num_pages_4k; /* 4k portion "pages" */ u32 num_hwpages;
if (!pd) { if (!pd) {
ehca_gen_err("bad pd=%p", pd); ehca_gen_err("bad pd=%p", pd);
...@@ -307,17 +307,18 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt ...@@ -307,17 +307,18 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt
} }
/* determine number of MR pages */ /* determine number of MR pages */
num_pages_mr = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE); num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
num_pages_4k = NUM_CHUNKS((virt % EHCA_PAGESIZE) + length, num_hwpages = NUM_CHUNKS((virt % EHCA_PAGESIZE) + length,
EHCA_PAGESIZE); EHCA_PAGESIZE);
/* register MR on HCA */ /* register MR on HCA */
memset(&pginfo, 0, sizeof(pginfo));
pginfo.type = EHCA_MR_PGI_USER; pginfo.type = EHCA_MR_PGI_USER;
pginfo.num_pages = num_pages_mr; pginfo.num_kpages = num_kpages;
pginfo.num_4k = num_pages_4k; pginfo.num_hwpages = num_hwpages;
pginfo.region = e_mr->umem; pginfo.u.usr.region = e_mr->umem;
pginfo.next_4k = e_mr->umem->offset / EHCA_PAGESIZE; pginfo.next_hwpage = e_mr->umem->offset / EHCA_PAGESIZE;
pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk, pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
(&e_mr->umem->chunk_list), (&e_mr->umem->chunk_list),
list); list);
...@@ -365,9 +366,9 @@ int ehca_rereg_phys_mr(struct ib_mr *mr, ...@@ -365,9 +366,9 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
struct ehca_pd *new_pd; struct ehca_pd *new_pd;
u32 tmp_lkey, tmp_rkey; u32 tmp_lkey, tmp_rkey;
unsigned long sl_flags; unsigned long sl_flags;
u32 num_pages_mr = 0; u32 num_kpages = 0;
u32 num_pages_4k = 0; /* 4k portion "pages" */ u32 num_hwpages = 0;
struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; struct ehca_mr_pginfo pginfo;
u32 cur_pid = current->tgid; u32 cur_pid = current->tgid;
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
...@@ -463,16 +464,17 @@ int ehca_rereg_phys_mr(struct ib_mr *mr, ...@@ -463,16 +464,17 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
ret = -EINVAL; ret = -EINVAL;
goto rereg_phys_mr_exit1; goto rereg_phys_mr_exit1;
} }
num_pages_mr = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) + num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
new_size, PAGE_SIZE); new_size, PAGE_SIZE);
num_pages_4k = NUM_CHUNKS(((u64)new_start % EHCA_PAGESIZE) + num_hwpages = NUM_CHUNKS(((u64)new_start % EHCA_PAGESIZE) +
new_size, EHCA_PAGESIZE); new_size, EHCA_PAGESIZE);
memset(&pginfo, 0, sizeof(pginfo));
pginfo.type = EHCA_MR_PGI_PHYS; pginfo.type = EHCA_MR_PGI_PHYS;
pginfo.num_pages = num_pages_mr; pginfo.num_kpages = num_kpages;
pginfo.num_4k = num_pages_4k; pginfo.num_hwpages = num_hwpages;
pginfo.num_phys_buf = num_phys_buf; pginfo.u.phy.num_phys_buf = num_phys_buf;
pginfo.phys_buf_array = phys_buf_array; pginfo.u.phy.phys_buf_array = phys_buf_array;
pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) / pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) /
EHCA_PAGESIZE); EHCA_PAGESIZE);
} }
if (mr_rereg_mask & IB_MR_REREG_ACCESS) if (mr_rereg_mask & IB_MR_REREG_ACCESS)
...@@ -704,7 +706,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, ...@@ -704,7 +706,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
struct ehca_mr *e_fmr; struct ehca_mr *e_fmr;
int ret; int ret;
u32 tmp_lkey, tmp_rkey; u32 tmp_lkey, tmp_rkey;
struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; struct ehca_mr_pginfo pginfo;
/* check other parameters */ /* check other parameters */
if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
...@@ -750,6 +752,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, ...@@ -750,6 +752,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
e_fmr->flags |= EHCA_MR_FLAG_FMR; e_fmr->flags |= EHCA_MR_FLAG_FMR;
/* register MR on HCA */ /* register MR on HCA */
memset(&pginfo, 0, sizeof(pginfo));
ret = ehca_reg_mr(shca, e_fmr, NULL, ret = ehca_reg_mr(shca, e_fmr, NULL,
fmr_attr->max_pages * (1 << fmr_attr->page_shift), fmr_attr->max_pages * (1 << fmr_attr->page_shift),
mr_access_flags, e_pd, &pginfo, mr_access_flags, e_pd, &pginfo,
...@@ -788,7 +791,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr, ...@@ -788,7 +791,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr,
container_of(fmr->device, struct ehca_shca, ib_device); container_of(fmr->device, struct ehca_shca, ib_device);
struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd); struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; struct ehca_mr_pginfo pginfo;
u32 tmp_lkey, tmp_rkey; u32 tmp_lkey, tmp_rkey;
if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
...@@ -814,11 +817,12 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr, ...@@ -814,11 +817,12 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr,
fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps); fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
} }
memset(&pginfo, 0, sizeof(pginfo));
pginfo.type = EHCA_MR_PGI_FMR; pginfo.type = EHCA_MR_PGI_FMR;
pginfo.num_pages = list_len; pginfo.num_kpages = list_len;
pginfo.num_4k = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE); pginfo.num_hwpages = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
pginfo.page_list = page_list; pginfo.u.fmr.page_list = page_list;
pginfo.next_4k = ((iova & (e_fmr->fmr_page_size-1)) / pginfo.next_hwpage = ((iova & (e_fmr->fmr_page_size-1)) /
EHCA_PAGESIZE); EHCA_PAGESIZE);
ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova, ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
...@@ -979,8 +983,8 @@ int ehca_reg_mr(struct ehca_shca *shca, ...@@ -979,8 +983,8 @@ int ehca_reg_mr(struct ehca_shca *shca,
goto ehca_reg_mr_exit1; goto ehca_reg_mr_exit1;
/* successful registration */ /* successful registration */
e_mr->num_pages = pginfo->num_pages; e_mr->num_kpages = pginfo->num_kpages;
e_mr->num_4k = pginfo->num_4k; e_mr->num_hwpages = pginfo->num_hwpages;
e_mr->start = iova_start; e_mr->start = iova_start;
e_mr->size = size; e_mr->size = size;
e_mr->acl = acl; e_mr->acl = acl;
...@@ -993,10 +997,10 @@ int ehca_reg_mr(struct ehca_shca *shca, ...@@ -993,10 +997,10 @@ int ehca_reg_mr(struct ehca_shca *shca,
if (h_ret != H_SUCCESS) { if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p " ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
"iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x " "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
"pginfo=%p num_pages=%lx num_4k=%lx ret=%x", "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%x",
h_ret, shca, e_mr, iova_start, size, acl, e_pd, h_ret, shca, e_mr, iova_start, size, acl, e_pd,
hipzout.lkey, pginfo, pginfo->num_pages, hipzout.lkey, pginfo, pginfo->num_kpages,
pginfo->num_4k, ret); pginfo->num_hwpages, ret);
ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, " ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
"not recoverable"); "not recoverable");
} }
...@@ -1004,9 +1008,9 @@ int ehca_reg_mr(struct ehca_shca *shca, ...@@ -1004,9 +1008,9 @@ int ehca_reg_mr(struct ehca_shca *shca,
if (ret) if (ret)
ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
"iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
"num_pages=%lx num_4k=%lx", "num_kpages=%lx num_hwpages=%lx",
ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
pginfo->num_pages, pginfo->num_4k); pginfo->num_kpages, pginfo->num_hwpages);
return ret; return ret;
} /* end ehca_reg_mr() */ } /* end ehca_reg_mr() */
...@@ -1031,10 +1035,10 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, ...@@ -1031,10 +1035,10 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
} }
/* max 512 pages per shot */ /* max 512 pages per shot */
for (i = 0; i < NUM_CHUNKS(pginfo->num_4k, MAX_RPAGES); i++) { for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
if (i == NUM_CHUNKS(pginfo->num_4k, MAX_RPAGES) - 1) { if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
rnum = pginfo->num_4k % MAX_RPAGES; /* last shot */ rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
if (rnum == 0) if (rnum == 0)
rnum = MAX_RPAGES; /* last shot is full */ rnum = MAX_RPAGES; /* last shot is full */
} else } else
...@@ -1070,7 +1074,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, ...@@ -1070,7 +1074,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
0, /* pagesize 4k */ 0, /* pagesize 4k */
0, rpage, rnum); 0, rpage, rnum);
if (i == NUM_CHUNKS(pginfo->num_4k, MAX_RPAGES) - 1) { if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
/* /*
* check for 'registration complete'==H_SUCCESS * check for 'registration complete'==H_SUCCESS
* and for 'page registered'==H_PAGE_REGISTERED * and for 'page registered'==H_PAGE_REGISTERED
...@@ -1106,8 +1110,8 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca, ...@@ -1106,8 +1110,8 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
ehca_reg_mr_rpages_exit0: ehca_reg_mr_rpages_exit0:
if (ret) if (ret)
ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
"num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo, "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr,
pginfo->num_pages, pginfo->num_4k); pginfo, pginfo->num_kpages, pginfo->num_hwpages);
return ret; return ret;
} /* end ehca_reg_mr_rpages() */ } /* end ehca_reg_mr_rpages() */
...@@ -1142,12 +1146,12 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, ...@@ -1142,12 +1146,12 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
} }
pginfo_save = *pginfo; pginfo_save = *pginfo;
ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage); ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_hwpages, kpage);
if (ret) { if (ret) {
ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
"pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p", "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx "
e_mr, pginfo, pginfo->type, pginfo->num_pages, "kpage=%p", e_mr, pginfo, pginfo->type,
pginfo->num_4k,kpage); pginfo->num_kpages, pginfo->num_hwpages, kpage);
goto ehca_rereg_mr_rereg1_exit1; goto ehca_rereg_mr_rereg1_exit1;
} }
rpage = virt_to_abs(kpage); rpage = virt_to_abs(kpage);
...@@ -1181,8 +1185,8 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, ...@@ -1181,8 +1185,8 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
* successful reregistration * successful reregistration
* note: start and start_out are identical for eServer HCAs * note: start and start_out are identical for eServer HCAs
*/ */
e_mr->num_pages = pginfo->num_pages; e_mr->num_kpages = pginfo->num_kpages;
e_mr->num_4k = pginfo->num_4k; e_mr->num_hwpages = pginfo->num_hwpages;
e_mr->start = iova_start; e_mr->start = iova_start;
e_mr->size = size; e_mr->size = size;
e_mr->acl = acl; e_mr->acl = acl;
...@@ -1195,9 +1199,9 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, ...@@ -1195,9 +1199,9 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
ehca_rereg_mr_rereg1_exit0: ehca_rereg_mr_rereg1_exit0:
if ( ret && (ret != -EAGAIN) ) if ( ret && (ret != -EAGAIN) )
ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x " ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
"pginfo=%p num_pages=%lx num_4k=%lx", "pginfo=%p num_kpages=%lx num_hwpages=%lx",
ret, *lkey, *rkey, pginfo, pginfo->num_pages, ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
pginfo->num_4k); pginfo->num_hwpages);
return ret; return ret;
} /* end ehca_rereg_mr_rereg1() */ } /* end ehca_rereg_mr_rereg1() */
...@@ -1219,10 +1223,12 @@ int ehca_rereg_mr(struct ehca_shca *shca, ...@@ -1219,10 +1223,12 @@ int ehca_rereg_mr(struct ehca_shca *shca,
int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */ int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
/* first determine reregistration hCall(s) */ /* first determine reregistration hCall(s) */
if ((pginfo->num_4k > MAX_RPAGES) || (e_mr->num_4k > MAX_RPAGES) || if ((pginfo->num_hwpages > MAX_RPAGES) ||
(pginfo->num_4k > e_mr->num_4k)) { (e_mr->num_hwpages > MAX_RPAGES) ||
ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx " (pginfo->num_hwpages > e_mr->num_hwpages)) {
"e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k); ehca_dbg(&shca->ib_device, "Rereg3 case, "
"pginfo->num_hwpages=%lx e_mr->num_hwpages=%x",
pginfo->num_hwpages, e_mr->num_hwpages);
rereg_1_hcall = 0; rereg_1_hcall = 0;
rereg_3_hcall = 1; rereg_3_hcall = 1;
} }
...@@ -1286,9 +1292,9 @@ int ehca_rereg_mr(struct ehca_shca *shca, ...@@ -1286,9 +1292,9 @@ int ehca_rereg_mr(struct ehca_shca *shca,
if (ret) if (ret)
ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
"iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
"num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
"rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey, acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
rereg_1_hcall, rereg_3_hcall); rereg_1_hcall, rereg_3_hcall);
return ret; return ret;
} /* end ehca_rereg_mr() */ } /* end ehca_rereg_mr() */
...@@ -1306,7 +1312,7 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca, ...@@ -1306,7 +1312,7 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd); container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
struct ehca_mr save_fmr; struct ehca_mr save_fmr;
u32 tmp_lkey, tmp_rkey; u32 tmp_lkey, tmp_rkey;
struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; struct ehca_mr_pginfo pginfo;
struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
/* first check if reregistration hCall can be used for unmap */ /* first check if reregistration hCall can be used for unmap */
...@@ -1370,9 +1376,10 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca, ...@@ -1370,9 +1376,10 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt; e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
e_fmr->acl = save_fmr.acl; e_fmr->acl = save_fmr.acl;
memset(&pginfo, 0, sizeof(pginfo));
pginfo.type = EHCA_MR_PGI_FMR; pginfo.type = EHCA_MR_PGI_FMR;
pginfo.num_pages = 0; pginfo.num_kpages = 0;
pginfo.num_4k = 0; pginfo.num_hwpages = 0;
ret = ehca_reg_mr(shca, e_fmr, NULL, ret = ehca_reg_mr(shca, e_fmr, NULL,
(e_fmr->fmr_max_pages * e_fmr->fmr_page_size), (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
e_fmr->acl, e_pd, &pginfo, &tmp_lkey, e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
...@@ -1428,8 +1435,8 @@ int ehca_reg_smr(struct ehca_shca *shca, ...@@ -1428,8 +1435,8 @@ int ehca_reg_smr(struct ehca_shca *shca,
goto ehca_reg_smr_exit0; goto ehca_reg_smr_exit0;
} }
/* successful registration */ /* successful registration */
e_newmr->num_pages = e_origmr->num_pages; e_newmr->num_kpages = e_origmr->num_kpages;
e_newmr->num_4k = e_origmr->num_4k; e_newmr->num_hwpages = e_origmr->num_hwpages;
e_newmr->start = iova_start; e_newmr->start = iova_start;
e_newmr->size = e_origmr->size; e_newmr->size = e_origmr->size;
e_newmr->acl = acl; e_newmr->acl = acl;
...@@ -1458,10 +1465,10 @@ int ehca_reg_internal_maxmr( ...@@ -1458,10 +1465,10 @@ int ehca_reg_internal_maxmr(
struct ehca_mr *e_mr; struct ehca_mr *e_mr;
u64 *iova_start; u64 *iova_start;
u64 size_maxmr; u64 size_maxmr;
struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; struct ehca_mr_pginfo pginfo;
struct ib_phys_buf ib_pbuf; struct ib_phys_buf ib_pbuf;
u32 num_pages_mr; u32 num_kpages;
u32 num_pages_4k; /* 4k portion "pages" */ u32 num_hwpages;
e_mr = ehca_mr_new(); e_mr = ehca_mr_new();
if (!e_mr) { if (!e_mr) {
...@@ -1476,25 +1483,26 @@ int ehca_reg_internal_maxmr( ...@@ -1476,25 +1483,26 @@ int ehca_reg_internal_maxmr(
iova_start = (u64*)KERNELBASE; iova_start = (u64*)KERNELBASE;
ib_pbuf.addr = 0; ib_pbuf.addr = 0;
ib_pbuf.size = size_maxmr; ib_pbuf.size = size_maxmr;
num_pages_mr = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
PAGE_SIZE); PAGE_SIZE);
num_pages_4k = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + size_maxmr,
+ size_maxmr, EHCA_PAGESIZE); EHCA_PAGESIZE);
memset(&pginfo, 0, sizeof(pginfo));
pginfo.type = EHCA_MR_PGI_PHYS; pginfo.type = EHCA_MR_PGI_PHYS;
pginfo.num_pages = num_pages_mr; pginfo.num_kpages = num_kpages;
pginfo.num_4k = num_pages_4k; pginfo.num_hwpages = num_hwpages;
pginfo.num_phys_buf = 1; pginfo.u.phy.num_phys_buf = 1;
pginfo.phys_buf_array = &ib_pbuf; pginfo.u.phy.phys_buf_array = &ib_pbuf;
ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
&pginfo, &e_mr->ib.ib_mr.lkey, &pginfo, &e_mr->ib.ib_mr.lkey,
&e_mr->ib.ib_mr.rkey); &e_mr->ib.ib_mr.rkey);
if (ret) { if (ret) {
ehca_err(&shca->ib_device, "reg of internal max MR failed, " ehca_err(&shca->ib_device, "reg of internal max MR failed, "
"e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x " "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x "
"num_pages_4k=%x", e_mr, iova_start, size_maxmr, "num_hwpages=%x", e_mr, iova_start, size_maxmr,
num_pages_mr, num_pages_4k); num_kpages, num_hwpages);
goto ehca_reg_internal_maxmr_exit1; goto ehca_reg_internal_maxmr_exit1;
} }
...@@ -1546,8 +1554,8 @@ int ehca_reg_maxmr(struct ehca_shca *shca, ...@@ -1546,8 +1554,8 @@ int ehca_reg_maxmr(struct ehca_shca *shca,
return ehca2ib_return_code(h_ret); return ehca2ib_return_code(h_ret);
} }
/* successful registration */ /* successful registration */
e_newmr->num_pages = e_origmr->num_pages; e_newmr->num_kpages = e_origmr->num_kpages;
e_newmr->num_4k = e_origmr->num_4k; e_newmr->num_hwpages = e_origmr->num_hwpages;
e_newmr->start = iova_start; e_newmr->start = iova_start;
e_newmr->size = e_origmr->size; e_newmr->size = e_origmr->size;
e_newmr->acl = acl; e_newmr->acl = acl;
...@@ -1693,138 +1701,139 @@ int ehca_set_pagebuf(struct ehca_mr *e_mr, ...@@ -1693,138 +1701,139 @@ int ehca_set_pagebuf(struct ehca_mr *e_mr,
struct ib_umem_chunk *chunk; struct ib_umem_chunk *chunk;
struct ib_phys_buf *pbuf; struct ib_phys_buf *pbuf;
u64 *fmrlist; u64 *fmrlist;
u64 num4k, pgaddr, offs4k; u64 num_hw, pgaddr, offs_hw;
u32 i = 0; u32 i = 0;
u32 j = 0; u32 j = 0;
if (pginfo->type == EHCA_MR_PGI_PHYS) { if (pginfo->type == EHCA_MR_PGI_PHYS) {
/* loop over desired phys_buf_array entries */ /* loop over desired phys_buf_array entries */
while (i < number) { while (i < number) {
pbuf = pginfo->phys_buf_array + pginfo->next_buf; pbuf = pginfo->u.phy.phys_buf_array
num4k = NUM_CHUNKS((pbuf->addr % EHCA_PAGESIZE) + pginfo->u.phy.next_buf;
+ pbuf->size, EHCA_PAGESIZE); num_hw = NUM_CHUNKS((pbuf->addr % EHCA_PAGESIZE) +
offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; pbuf->size, EHCA_PAGESIZE);
while (pginfo->next_4k < offs4k + num4k) { offs_hw = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
while (pginfo->next_hwpage < offs_hw + num_hw) {
/* sanity check */ /* sanity check */
if ((pginfo->page_cnt >= pginfo->num_pages) || if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
(pginfo->page_4k_cnt >= pginfo->num_4k)) { (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
ehca_gen_err("page_cnt >= num_pages, " ehca_gen_err("kpage_cnt >= num_kpages, "
"page_cnt=%lx " "kpage_cnt=%lx "
"num_pages=%lx " "num_kpages=%lx "
"page_4k_cnt=%lx " "hwpage_cnt=%lx "
"num_4k=%lx i=%x", "num_hwpages=%lx i=%x",
pginfo->page_cnt, pginfo->kpage_cnt,
pginfo->num_pages, pginfo->num_kpages,
pginfo->page_4k_cnt, pginfo->hwpage_cnt,
pginfo->num_4k, i); pginfo->num_hwpages, i);
ret = -EFAULT; ret = -EFAULT;
goto ehca_set_pagebuf_exit0; goto ehca_set_pagebuf_exit0;
} }
*kpage = phys_to_abs( *kpage = phys_to_abs(
(pbuf->addr & EHCA_PAGEMASK) (pbuf->addr & EHCA_PAGEMASK)
+ (pginfo->next_4k * EHCA_PAGESIZE)); + (pginfo->next_hwpage * EHCA_PAGESIZE));
if ( !(*kpage) && pbuf->addr ) { if ( !(*kpage) && pbuf->addr ) {
ehca_gen_err("pbuf->addr=%lx " ehca_gen_err("pbuf->addr=%lx "
"pbuf->size=%lx " "pbuf->size=%lx "
"next_4k=%lx", pbuf->addr, "next_hwpage=%lx", pbuf->addr,
pbuf->size, pbuf->size,
pginfo->next_4k); pginfo->next_hwpage);
ret = -EFAULT; ret = -EFAULT;
goto ehca_set_pagebuf_exit0; goto ehca_set_pagebuf_exit0;
} }
(pginfo->page_4k_cnt)++; (pginfo->hwpage_cnt)++;
(pginfo->next_4k)++; (pginfo->next_hwpage)++;
if (pginfo->next_4k % if (pginfo->next_hwpage %
(PAGE_SIZE / EHCA_PAGESIZE) == 0) (PAGE_SIZE / EHCA_PAGESIZE) == 0)
(pginfo->page_cnt)++; (pginfo->kpage_cnt)++;
kpage++; kpage++;
i++; i++;
if (i >= number) break; if (i >= number) break;
} }
if (pginfo->next_4k >= offs4k + num4k) { if (pginfo->next_hwpage >= offs_hw + num_hw) {
(pginfo->next_buf)++; (pginfo->u.phy.next_buf)++;
pginfo->next_4k = 0; pginfo->next_hwpage = 0;
} }
} }
} else if (pginfo->type == EHCA_MR_PGI_USER) { } else if (pginfo->type == EHCA_MR_PGI_USER) {
/* loop over desired chunk entries */ /* loop over desired chunk entries */
chunk = pginfo->next_chunk; chunk = pginfo->u.usr.next_chunk;
prev_chunk = pginfo->next_chunk; prev_chunk = pginfo->u.usr.next_chunk;
list_for_each_entry_continue(chunk, list_for_each_entry_continue(chunk,
(&(pginfo->region->chunk_list)), (&(pginfo->u.usr.region->chunk_list)),
list) { list) {
for (i = pginfo->next_nmap; i < chunk->nmap; ) { for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
pgaddr = ( page_to_pfn(chunk->page_list[i].page) pgaddr = ( page_to_pfn(chunk->page_list[i].page)
<< PAGE_SHIFT ); << PAGE_SHIFT );
*kpage = phys_to_abs(pgaddr + *kpage = phys_to_abs(pgaddr +
(pginfo->next_4k * (pginfo->next_hwpage *
EHCA_PAGESIZE)); EHCA_PAGESIZE));
if ( !(*kpage) ) { if ( !(*kpage) ) {
ehca_gen_err("pgaddr=%lx " ehca_gen_err("pgaddr=%lx "
"chunk->page_list[i]=%lx " "chunk->page_list[i]=%lx "
"i=%x next_4k=%lx mr=%p", "i=%x next_hwpage=%lx mr=%p",
pgaddr, pgaddr,
(u64)sg_dma_address( (u64)sg_dma_address(
&chunk-> &chunk->
page_list[i]), page_list[i]),
i, pginfo->next_4k, e_mr); i, pginfo->next_hwpage, e_mr);
ret = -EFAULT; ret = -EFAULT;
goto ehca_set_pagebuf_exit0; goto ehca_set_pagebuf_exit0;
} }
(pginfo->page_4k_cnt)++; (pginfo->hwpage_cnt)++;
(pginfo->next_4k)++; (pginfo->next_hwpage)++;
kpage++; kpage++;
if (pginfo->next_4k % if (pginfo->next_hwpage %
(PAGE_SIZE / EHCA_PAGESIZE) == 0) { (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
(pginfo->page_cnt)++; (pginfo->kpage_cnt)++;
(pginfo->next_nmap)++; (pginfo->u.usr.next_nmap)++;
pginfo->next_4k = 0; pginfo->next_hwpage = 0;
i++; i++;
} }
j++; j++;
if (j >= number) break; if (j >= number) break;
} }
if ((pginfo->next_nmap >= chunk->nmap) && if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
(j >= number)) { (j >= number)) {
pginfo->next_nmap = 0; pginfo->u.usr.next_nmap = 0;
prev_chunk = chunk; prev_chunk = chunk;
break; break;
} else if (pginfo->next_nmap >= chunk->nmap) { } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
pginfo->next_nmap = 0; pginfo->u.usr.next_nmap = 0;
prev_chunk = chunk; prev_chunk = chunk;
} else if (j >= number) } else if (j >= number)
break; break;
else else
prev_chunk = chunk; prev_chunk = chunk;
} }
pginfo->next_chunk = pginfo->u.usr.next_chunk =
list_prepare_entry(prev_chunk, list_prepare_entry(prev_chunk,
(&(pginfo->region->chunk_list)), (&(pginfo->u.usr.region->chunk_list)),
list); list);
} else if (pginfo->type == EHCA_MR_PGI_FMR) { } else if (pginfo->type == EHCA_MR_PGI_FMR) {
/* loop over desired page_list entries */ /* loop over desired page_list entries */
fmrlist = pginfo->page_list + pginfo->next_listelem; fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
for (i = 0; i < number; i++) { for (i = 0; i < number; i++) {
*kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
pginfo->next_4k * EHCA_PAGESIZE); pginfo->next_hwpage * EHCA_PAGESIZE);
if ( !(*kpage) ) { if ( !(*kpage) ) {
ehca_gen_err("*fmrlist=%lx fmrlist=%p " ehca_gen_err("*fmrlist=%lx fmrlist=%p "
"next_listelem=%lx next_4k=%lx", "next_listelem=%lx next_hwpage=%lx",
*fmrlist, fmrlist, *fmrlist, fmrlist,
pginfo->next_listelem, pginfo->u.fmr.next_listelem,
pginfo->next_4k); pginfo->next_hwpage);
ret = -EFAULT; ret = -EFAULT;
goto ehca_set_pagebuf_exit0; goto ehca_set_pagebuf_exit0;
} }
(pginfo->page_4k_cnt)++; (pginfo->hwpage_cnt)++;
(pginfo->next_4k)++; (pginfo->next_hwpage)++;
kpage++; kpage++;
if (pginfo->next_4k % if (pginfo->next_hwpage %
(e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) { (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
(pginfo->page_cnt)++; (pginfo->kpage_cnt)++;
(pginfo->next_listelem)++; (pginfo->u.fmr.next_listelem)++;
fmrlist++; fmrlist++;
pginfo->next_4k = 0; pginfo->next_hwpage = 0;
} }
} }
} else { } else {
...@@ -1835,16 +1844,16 @@ int ehca_set_pagebuf(struct ehca_mr *e_mr, ...@@ -1835,16 +1844,16 @@ int ehca_set_pagebuf(struct ehca_mr *e_mr,
ehca_set_pagebuf_exit0: ehca_set_pagebuf_exit0:
if (ret) if (ret)
ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx " ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_kpages=%lx "
"num_4k=%lx next_buf=%lx next_4k=%lx number=%x " "num_hwpages=%lx next_buf=%lx next_hwpage=%lx number=%x "
"kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x " "kpage=%p kpage_cnt=%lx hwpage_cnt=%lx i=%x "
"next_listelem=%lx region=%p next_chunk=%p " "next_listelem=%lx region=%p next_chunk=%p "
"next_nmap=%lx", ret, e_mr, pginfo, pginfo->type, "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
pginfo->num_pages, pginfo->num_4k, pginfo->num_kpages, pginfo->num_hwpages,
pginfo->next_buf, pginfo->next_4k, number, kpage, pginfo->u.phy.next_buf, pginfo->next_hwpage, number, kpage,
pginfo->page_cnt, pginfo->page_4k_cnt, i, pginfo->kpage_cnt, pginfo->hwpage_cnt, i,
pginfo->next_listelem, pginfo->region, pginfo->u.fmr.next_listelem, pginfo->u.usr.region,
pginfo->next_chunk, pginfo->next_nmap); pginfo->u.usr.next_chunk, pginfo->u.usr.next_nmap);
return ret; return ret;
} /* end ehca_set_pagebuf() */ } /* end ehca_set_pagebuf() */
...@@ -1860,101 +1869,101 @@ int ehca_set_pagebuf_1(struct ehca_mr *e_mr, ...@@ -1860,101 +1869,101 @@ int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
u64 *fmrlist; u64 *fmrlist;
struct ib_umem_chunk *chunk; struct ib_umem_chunk *chunk;
struct ib_umem_chunk *prev_chunk; struct ib_umem_chunk *prev_chunk;
u64 pgaddr, num4k, offs4k; u64 pgaddr, num_hw, offs_hw;
if (pginfo->type == EHCA_MR_PGI_PHYS) { if (pginfo->type == EHCA_MR_PGI_PHYS) {
/* sanity check */ /* sanity check */
if ((pginfo->page_cnt >= pginfo->num_pages) || if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
(pginfo->page_4k_cnt >= pginfo->num_4k)) { (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx " ehca_gen_err("kpage_cnt >= num_hwpages, kpage_cnt=%lx "
"num_pages=%lx page_4k_cnt=%lx num_4k=%lx", "num_hwpages=%lx hwpage_cnt=%lx num_hwpages=%lx",
pginfo->page_cnt, pginfo->num_pages, pginfo->kpage_cnt, pginfo->num_kpages,
pginfo->page_4k_cnt, pginfo->num_4k); pginfo->hwpage_cnt, pginfo->num_hwpages);
ret = -EFAULT; ret = -EFAULT;
goto ehca_set_pagebuf_1_exit0; goto ehca_set_pagebuf_1_exit0;
} }
tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf; tmp_pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
num4k = NUM_CHUNKS((tmp_pbuf->addr % EHCA_PAGESIZE) + num_hw = NUM_CHUNKS((tmp_pbuf->addr % EHCA_PAGESIZE) +
tmp_pbuf->size, EHCA_PAGESIZE); tmp_pbuf->size, EHCA_PAGESIZE);
offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; offs_hw = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
*rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) + *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
(pginfo->next_4k * EHCA_PAGESIZE)); (pginfo->next_hwpage * EHCA_PAGESIZE));
if ( !(*rpage) && tmp_pbuf->addr ) { if ( !(*rpage) && tmp_pbuf->addr ) {
ehca_gen_err("tmp_pbuf->addr=%lx" ehca_gen_err("tmp_pbuf->addr=%lx"
" tmp_pbuf->size=%lx next_4k=%lx", " tmp_pbuf->size=%lx next_hwpage=%lx",
tmp_pbuf->addr, tmp_pbuf->size, tmp_pbuf->addr, tmp_pbuf->size,
pginfo->next_4k); pginfo->next_hwpage);
ret = -EFAULT; ret = -EFAULT;
goto ehca_set_pagebuf_1_exit0; goto ehca_set_pagebuf_1_exit0;
} }
(pginfo->page_4k_cnt)++; (pginfo->hwpage_cnt)++;
(pginfo->next_4k)++; (pginfo->next_hwpage)++;
if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0) if (pginfo->next_hwpage % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
(pginfo->page_cnt)++; (pginfo->kpage_cnt)++;
if (pginfo->next_4k >= offs4k + num4k) { if (pginfo->next_hwpage >= offs_hw + num_hw) {
(pginfo->next_buf)++; (pginfo->u.phy.next_buf)++;
pginfo->next_4k = 0; pginfo->next_hwpage = 0;
} }
} else if (pginfo->type == EHCA_MR_PGI_USER) { } else if (pginfo->type == EHCA_MR_PGI_USER) {
chunk = pginfo->next_chunk; chunk = pginfo->u.usr.next_chunk;
prev_chunk = pginfo->next_chunk; prev_chunk = pginfo->u.usr.next_chunk;
list_for_each_entry_continue(chunk, list_for_each_entry_continue(chunk,
(&(pginfo->region->chunk_list)), (&(pginfo->u.usr.region->chunk_list)),
list) { list) {
pgaddr = ( page_to_pfn(chunk->page_list[ pgaddr = ( page_to_pfn(chunk->page_list[
pginfo->next_nmap].page) pginfo->u.usr.next_nmap].page)
<< PAGE_SHIFT); << PAGE_SHIFT);
*rpage = phys_to_abs(pgaddr + *rpage = phys_to_abs(pgaddr +
(pginfo->next_4k * EHCA_PAGESIZE)); (pginfo->next_hwpage * EHCA_PAGESIZE));
if ( !(*rpage) ) { if ( !(*rpage) ) {
ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx" ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
" next_nmap=%lx next_4k=%lx mr=%p", " next_nmap=%lx next_hwpage=%lx mr=%p",
pgaddr, (u64)sg_dma_address( pgaddr, (u64)sg_dma_address(
&chunk->page_list[ &chunk->page_list[
pginfo-> pginfo->u.usr.
next_nmap]), next_nmap]),
pginfo->next_nmap, pginfo->next_4k, pginfo->u.usr.next_nmap, pginfo->next_hwpage,
e_mr); e_mr);
ret = -EFAULT; ret = -EFAULT;
goto ehca_set_pagebuf_1_exit0; goto ehca_set_pagebuf_1_exit0;
} }
(pginfo->page_4k_cnt)++; (pginfo->hwpage_cnt)++;
(pginfo->next_4k)++; (pginfo->next_hwpage)++;
if (pginfo->next_4k % if (pginfo->next_hwpage %
(PAGE_SIZE / EHCA_PAGESIZE) == 0) { (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
(pginfo->page_cnt)++; (pginfo->kpage_cnt)++;
(pginfo->next_nmap)++; (pginfo->u.usr.next_nmap)++;
pginfo->next_4k = 0; pginfo->next_hwpage = 0;
} }
if (pginfo->next_nmap >= chunk->nmap) { if (pginfo->u.usr.next_nmap >= chunk->nmap) {
pginfo->next_nmap = 0; pginfo->u.usr.next_nmap = 0;
prev_chunk = chunk; prev_chunk = chunk;
} }
break; break;
} }
pginfo->next_chunk = pginfo->u.usr.next_chunk =
list_prepare_entry(prev_chunk, list_prepare_entry(prev_chunk,
(&(pginfo->region->chunk_list)), (&(pginfo->u.usr.region->chunk_list)),
list); list);
} else if (pginfo->type == EHCA_MR_PGI_FMR) { } else if (pginfo->type == EHCA_MR_PGI_FMR) {
fmrlist = pginfo->page_list + pginfo->next_listelem; fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
*rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
pginfo->next_4k * EHCA_PAGESIZE); pginfo->next_hwpage * EHCA_PAGESIZE);
if ( !(*rpage) ) { if ( !(*rpage) ) {
ehca_gen_err("*fmrlist=%lx fmrlist=%p " ehca_gen_err("*fmrlist=%lx fmrlist=%p "
"next_listelem=%lx next_4k=%lx", "next_listelem=%lx next_hwpage=%lx",
*fmrlist, fmrlist, pginfo->next_listelem, *fmrlist, fmrlist, pginfo->u.fmr.next_listelem,
pginfo->next_4k); pginfo->next_hwpage);
ret = -EFAULT; ret = -EFAULT;
goto ehca_set_pagebuf_1_exit0; goto ehca_set_pagebuf_1_exit0;
} }
(pginfo->page_4k_cnt)++; (pginfo->hwpage_cnt)++;
(pginfo->next_4k)++; (pginfo->next_hwpage)++;
if (pginfo->next_4k % if (pginfo->next_hwpage %
(e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) { (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
(pginfo->page_cnt)++; (pginfo->kpage_cnt)++;
(pginfo->next_listelem)++; (pginfo->u.fmr.next_listelem)++;
pginfo->next_4k = 0; pginfo->next_hwpage = 0;
} }
} else { } else {
ehca_gen_err("bad pginfo->type=%x", pginfo->type); ehca_gen_err("bad pginfo->type=%x", pginfo->type);
...@@ -1964,15 +1973,15 @@ int ehca_set_pagebuf_1(struct ehca_mr *e_mr, ...@@ -1964,15 +1973,15 @@ int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
ehca_set_pagebuf_1_exit0: ehca_set_pagebuf_1_exit0:
if (ret) if (ret)
ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx " ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_kpages=%lx "
"num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p " "num_hwpages=%lx next_buf=%lx next_hwpage=%lx rpage=%p "
"page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx " "kpage_cnt=%lx hwpage_cnt=%lx next_listelem=%lx "
"region=%p next_chunk=%p next_nmap=%lx", ret, e_mr, "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
pginfo, pginfo->type, pginfo->num_pages, pginfo, pginfo->type, pginfo->num_kpages,
pginfo->num_4k, pginfo->next_buf, pginfo->next_4k, pginfo->num_hwpages, pginfo->u.phy.next_buf, pginfo->next_hwpage,
rpage, pginfo->page_cnt, pginfo->page_4k_cnt, rpage, pginfo->kpage_cnt, pginfo->hwpage_cnt,
pginfo->next_listelem, pginfo->region, pginfo->u.fmr.next_listelem, pginfo->u.usr.region,
pginfo->next_chunk, pginfo->next_nmap); pginfo->u.usr.next_chunk, pginfo->u.usr.next_nmap);
return ret; return ret;
} /* end ehca_set_pagebuf_1() */ } /* end ehca_set_pagebuf_1() */
...@@ -2054,8 +2063,8 @@ void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl, ...@@ -2054,8 +2063,8 @@ void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
void ehca_mr_deletenew(struct ehca_mr *mr) void ehca_mr_deletenew(struct ehca_mr *mr)
{ {
mr->flags = 0; mr->flags = 0;
mr->num_pages = 0; mr->num_kpages = 0;
mr->num_4k = 0; mr->num_hwpages = 0;
mr->acl = 0; mr->acl = 0;
mr->start = NULL; mr->start = NULL;
mr->fmr_page_size = 0; mr->fmr_page_size = 0;
...@@ -2064,8 +2073,6 @@ void ehca_mr_deletenew(struct ehca_mr *mr) ...@@ -2064,8 +2073,6 @@ void ehca_mr_deletenew(struct ehca_mr *mr)
mr->fmr_map_cnt = 0; mr->fmr_map_cnt = 0;
memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle)); memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
memset(&mr->galpas, 0, sizeof(mr->galpas)); memset(&mr->galpas, 0, sizeof(mr->galpas));
mr->nr_of_pages = 0;
mr->pagearray = NULL;
} /* end ehca_mr_deletenew() */ } /* end ehca_mr_deletenew() */
int ehca_init_mrmw_cache(void) int ehca_init_mrmw_cache(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment