Commit 6a7ed405 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk

Merge branch 'arm-privcmd-for-3.8' of git://xenbits.xen.org/people/ianc/linux...

Merge branch 'arm-privcmd-for-3.8' of git://xenbits.xen.org/people/ianc/linux into stable/for-linus-3.8

* 'arm-privcmd-for-3.8' of git://xenbits.xen.org/people/ianc/linux:
  xen: arm: implement remap interfaces needed for privcmd mappings.
  xen: correctly use xen_pfn_t in remap_domain_mfn_range.
  xen: arm: enable balloon driver
  xen: balloon: allow PVMMU interfaces to be compiled out
  xen: privcmd: support autotranslated physmap guests.
  xen: add pages parameter to xen_remap_domain_mfn_range
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parents a7be94ac f832da06
...@@ -49,6 +49,7 @@ DEFINE_GUEST_HANDLE(void); ...@@ -49,6 +49,7 @@ DEFINE_GUEST_HANDLE(void);
DEFINE_GUEST_HANDLE(uint64_t); DEFINE_GUEST_HANDLE(uint64_t);
DEFINE_GUEST_HANDLE(uint32_t); DEFINE_GUEST_HANDLE(uint32_t);
DEFINE_GUEST_HANDLE(xen_pfn_t); DEFINE_GUEST_HANDLE(xen_pfn_t);
DEFINE_GUEST_HANDLE(xen_ulong_t);
/* Maximum number of virtual CPUs in multi-processor guests. */ /* Maximum number of virtual CPUs in multi-processor guests. */
#define MAX_VIRT_CPUS 1 #define MAX_VIRT_CPUS 1
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include <xen/features.h> #include <xen/features.h>
#include <xen/platform_pci.h> #include <xen/platform_pci.h>
#include <xen/xenbus.h> #include <xen/xenbus.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -17,6 +19,8 @@ ...@@ -17,6 +19,8 @@
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/mm.h>
struct start_info _xen_start_info; struct start_info _xen_start_info;
struct start_info *xen_start_info = &_xen_start_info; struct start_info *xen_start_info = &_xen_start_info;
EXPORT_SYMBOL_GPL(xen_start_info); EXPORT_SYMBOL_GPL(xen_start_info);
...@@ -29,6 +33,10 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; ...@@ -29,6 +33,10 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
/* These are unused until we support booting "pre-ballooned" */
unsigned long xen_released_pages;
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
/* TODO: to be removed */ /* TODO: to be removed */
__read_mostly int xen_have_vector_callback; __read_mostly int xen_have_vector_callback;
EXPORT_SYMBOL_GPL(xen_have_vector_callback); EXPORT_SYMBOL_GPL(xen_have_vector_callback);
...@@ -38,15 +46,106 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug); ...@@ -38,15 +46,106 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
static __read_mostly int xen_events_irq = -1; static __read_mostly int xen_events_irq = -1;
/* map fgmfn of domid to lpfn in the current domain */
static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
unsigned int domid)
{
int rc;
struct xen_add_to_physmap_range xatp = {
.domid = DOMID_SELF,
.foreign_domid = domid,
.size = 1,
.space = XENMAPSPACE_gmfn_foreign,
};
xen_ulong_t idx = fgmfn;
xen_pfn_t gpfn = lpfn;
set_xen_guest_handle(xatp.idxs, &idx);
set_xen_guest_handle(xatp.gpfns, &gpfn);
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
if (rc) {
pr_warn("Failed to map pfn to mfn rc:%d pfn:%lx mfn:%lx\n",
rc, lpfn, fgmfn);
return 1;
}
return 0;
}
struct remap_data {
xen_pfn_t fgmfn; /* foreign domain's gmfn */
pgprot_t prot;
domid_t domid;
struct vm_area_struct *vma;
int index;
struct page **pages;
struct xen_remap_mfn_info *info;
};
static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
void *data)
{
struct remap_data *info = data;
struct page *page = info->pages[info->index++];
unsigned long pfn = page_to_pfn(page);
pte_t pte = pfn_pte(pfn, info->prot);
if (map_foreign_page(pfn, info->fgmfn, info->domid))
return -EFAULT;
set_pte_at(info->vma->vm_mm, addr, ptep, pte);
return 0;
}
int xen_remap_domain_mfn_range(struct vm_area_struct *vma, int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
unsigned long mfn, int nr, xen_pfn_t mfn, int nr,
pgprot_t prot, unsigned domid) pgprot_t prot, unsigned domid,
struct page **pages)
{ {
return -ENOSYS; int err;
struct remap_data data;
/* TBD: Batching, current sole caller only does page at a time */
if (nr > 1)
return -EINVAL;
data.fgmfn = mfn;
data.prot = prot;
data.domid = domid;
data.vma = vma;
data.index = 0;
data.pages = pages;
err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
remap_pte_fn, &data);
return err;
} }
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int nr, struct page **pages)
{
int i;
for (i = 0; i < nr; i++) {
struct xen_remove_from_physmap xrp;
unsigned long rc, pfn;
pfn = page_to_pfn(pages[i]);
xrp.domid = DOMID_SELF;
xrp.gpfn = pfn;
rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
if (rc) {
pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
pfn, rc);
return rc;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
/* /*
* see Documentation/devicetree/bindings/arm/xen.txt for the * see Documentation/devicetree/bindings/arm/xen.txt for the
* documentation of the Xen Device Tree format. * documentation of the Xen Device Tree format.
...@@ -148,21 +247,3 @@ static int __init xen_init_events(void) ...@@ -148,21 +247,3 @@ static int __init xen_init_events(void)
return 0; return 0;
} }
postcore_initcall(xen_init_events); postcore_initcall(xen_init_events);
/* XXX: only until balloon is properly working */
int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
{
*pages = alloc_pages(highmem ? GFP_HIGHUSER : GFP_KERNEL,
get_order(nr_pages));
if (*pages == NULL)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(alloc_xenballooned_pages);
void free_xenballooned_pages(int nr_pages, struct page **pages)
{
kfree(*pages);
*pages = NULL;
}
EXPORT_SYMBOL_GPL(free_xenballooned_pages);
...@@ -63,6 +63,7 @@ DEFINE_GUEST_HANDLE(void); ...@@ -63,6 +63,7 @@ DEFINE_GUEST_HANDLE(void);
DEFINE_GUEST_HANDLE(uint64_t); DEFINE_GUEST_HANDLE(uint64_t);
DEFINE_GUEST_HANDLE(uint32_t); DEFINE_GUEST_HANDLE(uint32_t);
DEFINE_GUEST_HANDLE(xen_pfn_t); DEFINE_GUEST_HANDLE(xen_pfn_t);
DEFINE_GUEST_HANDLE(xen_ulong_t);
#endif #endif
#ifndef HYPERVISOR_VIRT_START #ifndef HYPERVISOR_VIRT_START
......
...@@ -6,6 +6,7 @@ config XEN ...@@ -6,6 +6,7 @@ config XEN
bool "Xen guest support" bool "Xen guest support"
select PARAVIRT select PARAVIRT
select PARAVIRT_CLOCK select PARAVIRT_CLOCK
select XEN_HAVE_PVMMU
depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS) depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
depends on X86_CMPXCHG && X86_TSC depends on X86_CMPXCHG && X86_TSC
help help
......
...@@ -2478,8 +2478,10 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, ...@@ -2478,8 +2478,10 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
int xen_remap_domain_mfn_range(struct vm_area_struct *vma, int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
unsigned long mfn, int nr, xen_pfn_t mfn, int nr,
pgprot_t prot, unsigned domid) pgprot_t prot, unsigned domid,
struct page **pages)
{ {
struct remap_data rmd; struct remap_data rmd;
struct mmu_update mmu_update[REMAP_BATCH_SIZE]; struct mmu_update mmu_update[REMAP_BATCH_SIZE];
...@@ -2523,3 +2525,14 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma, ...@@ -2523,3 +2525,14 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
return err; return err;
} }
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
/* Returns: 0 success */
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages)
{
if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
return 0;
return -EINVAL;
}
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
...@@ -206,4 +206,7 @@ config XEN_MCE_LOG ...@@ -206,4 +206,7 @@ config XEN_MCE_LOG
Allow kernel fetching MCE error from Xen platform and Allow kernel fetching MCE error from Xen platform and
converting it into Linux mcelog format for mcelog tools converting it into Linux mcelog format for mcelog tools
config XEN_HAVE_PVMMU
bool
endmenu endmenu
ifneq ($(CONFIG_ARM),y) ifneq ($(CONFIG_ARM),y)
obj-y += manage.o balloon.o obj-y += manage.o
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
endif endif
obj-y += grant-table.o features.o events.o obj-y += grant-table.o features.o events.o balloon.o
obj-y += xenbus/ obj-y += xenbus/
nostackp := $(call cc-option, -fno-stack-protector) nostackp := $(call cc-option, -fno-stack-protector)
......
...@@ -359,6 +359,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) ...@@ -359,6 +359,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
set_phys_to_machine(pfn, frame_list[i]); set_phys_to_machine(pfn, frame_list[i]);
#ifdef CONFIG_XEN_HAVE_PVMMU
/* Link back into the page tables if not highmem. */ /* Link back into the page tables if not highmem. */
if (xen_pv_domain() && !PageHighMem(page)) { if (xen_pv_domain() && !PageHighMem(page)) {
int ret; int ret;
...@@ -368,6 +369,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) ...@@ -368,6 +369,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
0); 0);
BUG_ON(ret); BUG_ON(ret);
} }
#endif
/* Relinquish the page back to the allocator. */ /* Relinquish the page back to the allocator. */
ClearPageReserved(page); ClearPageReserved(page);
...@@ -416,13 +418,14 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -416,13 +418,14 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
scrub_page(page); scrub_page(page);
#ifdef CONFIG_XEN_HAVE_PVMMU
if (xen_pv_domain() && !PageHighMem(page)) { if (xen_pv_domain() && !PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping( ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT), (unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0); __pte_ma(0), 0);
BUG_ON(ret); BUG_ON(ret);
} }
#endif
} }
/* Ensure that ballooned highmem pages don't have kmaps. */ /* Ensure that ballooned highmem pages don't have kmaps. */
......
...@@ -33,11 +33,14 @@ ...@@ -33,11 +33,14 @@
#include <xen/features.h> #include <xen/features.h>
#include <xen/page.h> #include <xen/page.h>
#include <xen/xen-ops.h> #include <xen/xen-ops.h>
#include <xen/balloon.h>
#include "privcmd.h" #include "privcmd.h"
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
#define PRIV_VMA_LOCKED ((void *)1)
#ifndef HAVE_ARCH_PRIVCMD_MMAP #ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
#endif #endif
...@@ -178,7 +181,7 @@ static int mmap_mfn_range(void *data, void *state) ...@@ -178,7 +181,7 @@ static int mmap_mfn_range(void *data, void *state)
msg->va & PAGE_MASK, msg->va & PAGE_MASK,
msg->mfn, msg->npages, msg->mfn, msg->npages,
vma->vm_page_prot, vma->vm_page_prot,
st->domain); st->domain, NULL);
if (rc < 0) if (rc < 0)
return rc; return rc;
...@@ -199,6 +202,10 @@ static long privcmd_ioctl_mmap(void __user *udata) ...@@ -199,6 +202,10 @@ static long privcmd_ioctl_mmap(void __user *udata)
if (!xen_initial_domain()) if (!xen_initial_domain())
return -EPERM; return -EPERM;
/* We only support privcmd_ioctl_mmap_batch for auto translated. */
if (xen_feature(XENFEAT_auto_translated_physmap))
return -ENOSYS;
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd))) if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
return -EFAULT; return -EFAULT;
...@@ -246,6 +253,7 @@ struct mmap_batch_state { ...@@ -246,6 +253,7 @@ struct mmap_batch_state {
domid_t domain; domid_t domain;
unsigned long va; unsigned long va;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int index;
/* A tristate: /* A tristate:
* 0 for no errors * 0 for no errors
* 1 if at least one error has happened (and no * 1 if at least one error has happened (and no
...@@ -260,14 +268,24 @@ struct mmap_batch_state { ...@@ -260,14 +268,24 @@ struct mmap_batch_state {
xen_pfn_t __user *user_mfn; xen_pfn_t __user *user_mfn;
}; };
/* auto translated dom0 note: if domU being created is PV, then mfn is
* mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
*/
static int mmap_batch_fn(void *data, void *state) static int mmap_batch_fn(void *data, void *state)
{ {
xen_pfn_t *mfnp = data; xen_pfn_t *mfnp = data;
struct mmap_batch_state *st = state; struct mmap_batch_state *st = state;
struct vm_area_struct *vma = st->vma;
struct page **pages = vma->vm_private_data;
struct page *cur_page = NULL;
int ret; int ret;
if (xen_feature(XENFEAT_auto_translated_physmap))
cur_page = pages[st->index++];
ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
st->vma->vm_page_prot, st->domain); st->vma->vm_page_prot, st->domain,
&cur_page);
/* Store error code for second pass. */ /* Store error code for second pass. */
*(st->err++) = ret; *(st->err++) = ret;
...@@ -303,6 +321,32 @@ static int mmap_return_errors_v1(void *data, void *state) ...@@ -303,6 +321,32 @@ static int mmap_return_errors_v1(void *data, void *state)
return __put_user(*mfnp, st->user_mfn++); return __put_user(*mfnp, st->user_mfn++);
} }
/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
* the vma with the page info to use later.
* Returns: 0 if success, otherwise -errno
*/
static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
{
int rc;
struct page **pages;
pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
if (pages == NULL)
return -ENOMEM;
rc = alloc_xenballooned_pages(numpgs, pages, 0);
if (rc != 0) {
pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
numpgs, rc);
kfree(pages);
return -ENOMEM;
}
BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED);
vma->vm_private_data = pages;
return 0;
}
static struct vm_operations_struct privcmd_vm_ops; static struct vm_operations_struct privcmd_vm_ops;
static long privcmd_ioctl_mmap_batch(void __user *udata, int version) static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
...@@ -370,10 +414,18 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) ...@@ -370,10 +414,18 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
goto out; goto out;
} }
if (xen_feature(XENFEAT_auto_translated_physmap)) {
ret = alloc_empty_pages(vma, m.num);
if (ret < 0) {
up_write(&mm->mmap_sem);
goto out;
}
}
state.domain = m.dom; state.domain = m.dom;
state.vma = vma; state.vma = vma;
state.va = m.addr; state.va = m.addr;
state.index = 0;
state.global_error = 0; state.global_error = 0;
state.err = err_array; state.err = err_array;
...@@ -438,6 +490,19 @@ static long privcmd_ioctl(struct file *file, ...@@ -438,6 +490,19 @@ static long privcmd_ioctl(struct file *file,
return ret; return ret;
} }
static void privcmd_close(struct vm_area_struct *vma)
{
struct page **pages = vma->vm_private_data;
int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages))
return;
xen_unmap_domain_mfn_range(vma, numpgs, pages);
free_xenballooned_pages(numpgs, pages);
kfree(pages);
}
static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
...@@ -448,6 +513,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -448,6 +513,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
static struct vm_operations_struct privcmd_vm_ops = { static struct vm_operations_struct privcmd_vm_ops = {
.close = privcmd_close,
.fault = privcmd_fault .fault = privcmd_fault
}; };
...@@ -465,7 +531,7 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -465,7 +531,7 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma) static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
{ {
return (xchg(&vma->vm_private_data, (void *)1) == NULL); return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED);
} }
const struct file_operations xen_privcmd_fops = { const struct file_operations xen_privcmd_fops = {
......
...@@ -153,6 +153,14 @@ struct xen_machphys_mapping { ...@@ -153,6 +153,14 @@ struct xen_machphys_mapping {
}; };
DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping_t); DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping_t);
#define XENMAPSPACE_shared_info 0 /* shared info page */
#define XENMAPSPACE_grant_table 1 /* grant table page */
#define XENMAPSPACE_gmfn 2 /* GMFN */
#define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
* XENMEM_add_to_physmap_range only.
*/
/* /*
* Sets the GPFN at which a particular page appears in the specified guest's * Sets the GPFN at which a particular page appears in the specified guest's
* pseudophysical address space. * pseudophysical address space.
...@@ -167,8 +175,6 @@ struct xen_add_to_physmap { ...@@ -167,8 +175,6 @@ struct xen_add_to_physmap {
uint16_t size; uint16_t size;
/* Source mapping space. */ /* Source mapping space. */
#define XENMAPSPACE_shared_info 0 /* shared info page */
#define XENMAPSPACE_grant_table 1 /* grant table page */
unsigned int space; unsigned int space;
/* Index into source mapping space. */ /* Index into source mapping space. */
...@@ -182,6 +188,24 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap); ...@@ -182,6 +188,24 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
/*** REMOVED ***/ /*** REMOVED ***/
/*#define XENMEM_translate_gpfn_list 8*/ /*#define XENMEM_translate_gpfn_list 8*/
#define XENMEM_add_to_physmap_range 23
struct xen_add_to_physmap_range {
/* Which domain to change the mapping for. */
domid_t domid;
uint16_t space; /* => enum phys_map_space */
/* Number of pages to go through */
uint16_t size;
domid_t foreign_domid; /* IFF gmfn_foreign */
/* Indexes into space being mapped. */
GUEST_HANDLE(xen_ulong_t) idxs;
/* GPFN in domid where the source mapping page should appear. */
GUEST_HANDLE(xen_pfn_t) gpfns;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap_range);
/* /*
* Returns the pseudo-physical memory map as it was when the domain * Returns the pseudo-physical memory map as it was when the domain
* was started (specified by XENMEM_set_memory_map). * was started (specified by XENMEM_set_memory_map).
...@@ -217,4 +241,20 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map); ...@@ -217,4 +241,20 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map);
* during a driver critical region. * during a driver critical region.
*/ */
extern spinlock_t xen_reservation_lock; extern spinlock_t xen_reservation_lock;
/*
* Unmaps the page appearing at a particular GPFN from the specified guest's
* pseudophysical address space.
* arg == addr of xen_remove_from_physmap_t.
*/
#define XENMEM_remove_from_physmap 15
struct xen_remove_from_physmap {
/* Which domain to change the mapping for. */
domid_t domid;
/* GPFN of the current mapping of the page. */
xen_pfn_t gpfn;
};
DEFINE_GUEST_HANDLE_STRUCT(xen_remove_from_physmap);
#endif /* __XEN_PUBLIC_MEMORY_H__ */ #endif /* __XEN_PUBLIC_MEMORY_H__ */
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define INCLUDE_XEN_OPS_H #define INCLUDE_XEN_OPS_H
#include <linux/percpu.h> #include <linux/percpu.h>
#include <asm/xen/interface.h>
DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
...@@ -26,8 +27,11 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order); ...@@ -26,8 +27,11 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
struct vm_area_struct; struct vm_area_struct;
int xen_remap_domain_mfn_range(struct vm_area_struct *vma, int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
unsigned long mfn, int nr, xen_pfn_t mfn, int nr,
pgprot_t prot, unsigned domid); pgprot_t prot, unsigned domid,
struct page **pages);
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages);
bool xen_running_on_version_or_later(unsigned int major, unsigned int minor); bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
#endif /* INCLUDE_XEN_OPS_H */ #endif /* INCLUDE_XEN_OPS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment