Commit 5995a68a authored by Julien Grall's avatar Julien Grall Committed by David Vrabel

xen/privcmd: Add support for Linux 64KB page granularity

The hypercall interface (as well as the toolstack) is always using 4KB
page granularity. When the toolstack is asking for mapping a series of
guest PFN in a batch, it expects to have the page map contiguously in
its virtual memory.

When Linux is using 64KB page granularity, the privcmd driver will have
to map multiple Xen PFN in a single Linux page.

Note that this solution works on page granularity which is a multiple of
4KB.
Signed-off-by: default avatarJulien Grall <julien.grall@citrix.com>
Reviewed-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
parent d0089e8a
...@@ -446,7 +446,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) ...@@ -446,7 +446,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
return -EINVAL; return -EINVAL;
} }
nr_pages = m.num; nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT))) if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
return -EINVAL; return -EINVAL;
...@@ -494,7 +494,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) ...@@ -494,7 +494,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
goto out_unlock; goto out_unlock;
} }
if (xen_feature(XENFEAT_auto_translated_physmap)) { if (xen_feature(XENFEAT_auto_translated_physmap)) {
ret = alloc_empty_pages(vma, m.num); ret = alloc_empty_pages(vma, nr_pages);
if (ret < 0) if (ret < 0)
goto out_unlock; goto out_unlock;
} else } else
...@@ -518,6 +518,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) ...@@ -518,6 +518,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
state.global_error = 0; state.global_error = 0;
state.version = version; state.version = version;
BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
/* mmap_batch_fn guarantees ret == 0 */ /* mmap_batch_fn guarantees ret == 0 */
BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t), BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_batch_fn, &state)); &pagelist, mmap_batch_fn, &state));
...@@ -582,12 +583,13 @@ static void privcmd_close(struct vm_area_struct *vma) ...@@ -582,12 +583,13 @@ static void privcmd_close(struct vm_area_struct *vma)
{ {
struct page **pages = vma->vm_private_data; struct page **pages = vma->vm_private_data;
int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
int rc; int rc;
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
return; return;
rc = xen_unmap_domain_gfn_range(vma, numpgs, pages); rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
if (rc == 0) if (rc == 0)
free_xenballooned_pages(numpgs, pages); free_xenballooned_pages(numpgs, pages);
else else
......
...@@ -38,31 +38,28 @@ ...@@ -38,31 +38,28 @@
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#include <xen/interface/memory.h> #include <xen/interface/memory.h>
/* map fgfn of domid to lpfn in the current domain */ typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
static int map_foreign_page(unsigned long lpfn, unsigned long fgfn,
unsigned int domid)
{
int rc;
struct xen_add_to_physmap_range xatp = {
.domid = DOMID_SELF,
.foreign_domid = domid,
.size = 1,
.space = XENMAPSPACE_gmfn_foreign,
};
xen_ulong_t idx = fgfn;
xen_pfn_t gpfn = lpfn;
int err = 0;
set_xen_guest_handle(xatp.idxs, &idx); /* Break down the pages in 4KB chunk and call fn for each gfn */
set_xen_guest_handle(xatp.gpfns, &gpfn); static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn,
set_xen_guest_handle(xatp.errs, &err); xen_gfn_fn_t fn, void *data)
{
unsigned long xen_pfn = 0;
struct page *page;
int i;
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); for (i = 0; i < nr_gfn; i++) {
return rc < 0 ? rc : err; if ((i % XEN_PFN_PER_PAGE) == 0) {
page = pages[i / XEN_PFN_PER_PAGE];
xen_pfn = page_to_xen_pfn(page);
}
fn(pfn_to_gfn(xen_pfn++), data);
}
} }
struct remap_data { struct remap_data {
xen_pfn_t *fgfn; /* foreign domain's gfn */ xen_pfn_t *fgfn; /* foreign domain's gfn */
int nr_fgfn; /* Number of foreign gfn left to map */
pgprot_t prot; pgprot_t prot;
domid_t domid; domid_t domid;
struct vm_area_struct *vma; struct vm_area_struct *vma;
...@@ -71,24 +68,71 @@ struct remap_data { ...@@ -71,24 +68,71 @@ struct remap_data {
struct xen_remap_gfn_info *info; struct xen_remap_gfn_info *info;
int *err_ptr; int *err_ptr;
int mapped; int mapped;
/* Hypercall parameters */
int h_errs[XEN_PFN_PER_PAGE];
xen_ulong_t h_idxs[XEN_PFN_PER_PAGE];
xen_pfn_t h_gpfns[XEN_PFN_PER_PAGE];
int h_iter; /* Iterator */
}; };
static void setup_hparams(unsigned long gfn, void *data)
{
struct remap_data *info = data;
info->h_idxs[info->h_iter] = *info->fgfn;
info->h_gpfns[info->h_iter] = gfn;
info->h_errs[info->h_iter] = 0;
info->h_iter++;
info->fgfn++;
}
static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
void *data) void *data)
{ {
struct remap_data *info = data; struct remap_data *info = data;
struct page *page = info->pages[info->index++]; struct page *page = info->pages[info->index++];
unsigned long pfn = page_to_pfn(page); pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), info->prot));
pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); int rc, nr_gfn;
int rc; uint32_t i;
struct xen_add_to_physmap_range xatp = {
.domid = DOMID_SELF,
.foreign_domid = info->domid,
.space = XENMAPSPACE_gmfn_foreign,
};
rc = map_foreign_page(pfn, *info->fgfn, info->domid); nr_gfn = min_t(typeof(info->nr_fgfn), XEN_PFN_PER_PAGE, info->nr_fgfn);
*info->err_ptr++ = rc; info->nr_fgfn -= nr_gfn;
if (!rc) {
set_pte_at(info->vma->vm_mm, addr, ptep, pte); info->h_iter = 0;
info->mapped++; xen_for_each_gfn(&page, nr_gfn, setup_hparams, info);
BUG_ON(info->h_iter != nr_gfn);
set_xen_guest_handle(xatp.idxs, info->h_idxs);
set_xen_guest_handle(xatp.gpfns, info->h_gpfns);
set_xen_guest_handle(xatp.errs, info->h_errs);
xatp.size = nr_gfn;
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
/* info->err_ptr expect to have one error status per Xen PFN */
for (i = 0; i < nr_gfn; i++) {
int err = (rc < 0) ? rc : info->h_errs[i];
*(info->err_ptr++) = err;
if (!err)
info->mapped++;
} }
info->fgfn++;
/*
* Note: The hypercall will return 0 in most of the case if even if
* all the fgmfn are not mapped. We still have to update the pte
* as the userspace may decide to continue.
*/
if (!rc)
set_pte_at(info->vma->vm_mm, addr, ptep, pte);
return 0; return 0;
} }
...@@ -102,13 +146,14 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, ...@@ -102,13 +146,14 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
{ {
int err; int err;
struct remap_data data; struct remap_data data;
unsigned long range = nr << PAGE_SHIFT; unsigned long range = DIV_ROUND_UP(nr, XEN_PFN_PER_PAGE) << PAGE_SHIFT;
/* Kept here for the purpose of making sure code doesn't break /* Kept here for the purpose of making sure code doesn't break
x86 PVOPS */ x86 PVOPS */
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
data.fgfn = gfn; data.fgfn = gfn;
data.nr_fgfn = nr;
data.prot = prot; data.prot = prot;
data.domid = domid; data.domid = domid;
data.vma = vma; data.vma = vma;
...@@ -123,21 +168,20 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, ...@@ -123,21 +168,20 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
} }
EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array); EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array);
int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, static void unmap_gfn(unsigned long gfn, void *data)
int nr, struct page **pages)
{ {
int i; struct xen_remove_from_physmap xrp;
for (i = 0; i < nr; i++) { xrp.domid = DOMID_SELF;
struct xen_remove_from_physmap xrp; xrp.gpfn = gfn;
unsigned long pfn; (void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
}
pfn = page_to_pfn(pages[i]); int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
int nr, struct page **pages)
{
xen_for_each_gfn(pages, nr, unmap_gfn, NULL);
xrp.domid = DOMID_SELF;
xrp.gpfn = pfn;
(void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
}
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range); EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment