Commit 3ee31b89 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.14b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - the new pvcalls backend for routing socket calls from a guest to dom0

 - some cleanups of Xen code

 - a fix for wrong usage of {get,put}_cpu()

* tag 'for-linus-4.14b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (27 commits)
  xen/mmu: set MMU_NORMAL_PT_UPDATE in remap_area_mfn_pte_fn
  xen: Don't try to call xen_alloc_p2m_entry() on autotranslating guests
  xen/events: events_fifo: Don't use {get,put}_cpu() in xen_evtchn_fifo_init()
  xen/pvcalls: use WARN_ON(1) instead of __WARN()
  xen: remove not used trace functions
  xen: remove unused function xen_set_domain_pte()
  xen: remove tests for pvh mode in pure pv paths
  xen-platform: constify pci_device_id.
  xen: cleanup xen.h
  xen: introduce a Kconfig option to enable the pvcalls backend
  xen/pvcalls: implement write
  xen/pvcalls: implement read
  xen/pvcalls: implement the ioworker functions
  xen/pvcalls: disconnect and module_exit
  xen/pvcalls: implement release command
  xen/pvcalls: implement poll command
  xen/pvcalls: implement accept command
  xen/pvcalls: implement listen command
  xen/pvcalls: implement bind command
  xen/pvcalls: implement connect command
  ...
parents bac65d9d d785d9ec
...@@ -158,9 +158,6 @@ static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn) ...@@ -158,9 +158,6 @@ static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
unsigned long pfn; unsigned long pfn;
int ret; int ret;
if (xen_feature(XENFEAT_auto_translated_physmap))
return mfn;
if (unlikely(mfn >= machine_to_phys_nr)) if (unlikely(mfn >= machine_to_phys_nr))
return ~0; return ~0;
...@@ -317,8 +314,6 @@ static inline pte_t __pte_ma(pteval_t x) ...@@ -317,8 +314,6 @@ static inline pte_t __pte_ma(pteval_t x)
#define p4d_val_ma(x) ((x).p4d) #define p4d_val_ma(x) ((x).p4d)
#endif #endif
void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
xmaddr_t arbitrary_virt_to_machine(void *address); xmaddr_t arbitrary_virt_to_machine(void *address);
unsigned long arbitrary_virt_to_mfn(void *vaddr); unsigned long arbitrary_virt_to_mfn(void *vaddr);
void make_lowmem_page_readonly(void *vaddr); void make_lowmem_page_readonly(void *vaddr);
......
...@@ -84,7 +84,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, ...@@ -84,7 +84,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
else else
rmd->mfn++; rmd->mfn++;
rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
rmd->mmu_update->val = pte_val_ma(pte); rmd->mmu_update->val = pte_val_ma(pte);
rmd->mmu_update++; rmd->mmu_update++;
......
...@@ -162,26 +162,6 @@ static bool xen_page_pinned(void *ptr) ...@@ -162,26 +162,6 @@ static bool xen_page_pinned(void *ptr)
return PagePinned(page); return PagePinned(page);
} }
void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
{
struct multicall_space mcs;
struct mmu_update *u;
trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
mcs = xen_mc_entry(sizeof(*u));
u = mcs.args;
/* ptep might be kmapped when using 32-bit HIGHPTE */
u->ptr = virt_to_machine(ptep).maddr;
u->val = pte_val_ma(pteval);
MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
EXPORT_SYMBOL_GPL(xen_set_domain_pte);
static void xen_extend_mmu_update(const struct mmu_update *update) static void xen_extend_mmu_update(const struct mmu_update *update)
{ {
struct multicall_space mcs; struct multicall_space mcs;
......
...@@ -212,8 +212,7 @@ void __ref xen_build_mfn_list_list(void) ...@@ -212,8 +212,7 @@ void __ref xen_build_mfn_list_list(void)
unsigned int level, topidx, mididx; unsigned int level, topidx, mididx;
unsigned long *mid_mfn_p; unsigned long *mid_mfn_p;
if (xen_feature(XENFEAT_auto_translated_physmap) || if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
return; return;
/* Pre-initialize p2m_top_mfn to be completely missing */ /* Pre-initialize p2m_top_mfn to be completely missing */
...@@ -269,9 +268,6 @@ void __ref xen_build_mfn_list_list(void) ...@@ -269,9 +268,6 @@ void __ref xen_build_mfn_list_list(void)
void xen_setup_mfn_list_list(void) void xen_setup_mfn_list_list(void)
{ {
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS) if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
...@@ -291,9 +287,6 @@ void __init xen_build_dynamic_phys_to_machine(void) ...@@ -291,9 +287,6 @@ void __init xen_build_dynamic_phys_to_machine(void)
{ {
unsigned long pfn; unsigned long pfn;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list; xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE); xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);
...@@ -540,9 +533,6 @@ int xen_alloc_p2m_entry(unsigned long pfn) ...@@ -540,9 +533,6 @@ int xen_alloc_p2m_entry(unsigned long pfn)
unsigned long addr = (unsigned long)(xen_p2m_addr + pfn); unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
unsigned long p2m_pfn; unsigned long p2m_pfn;
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
ptep = lookup_address(addr, &level); ptep = lookup_address(addr, &level);
BUG_ON(!ptep || level != PG_LEVEL_4K); BUG_ON(!ptep || level != PG_LEVEL_4K);
pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1)); pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
...@@ -640,9 +630,6 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s, ...@@ -640,9 +630,6 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
if (unlikely(pfn_s >= xen_p2m_size)) if (unlikely(pfn_s >= xen_p2m_size))
return 0; return 0;
if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
return pfn_e - pfn_s;
if (pfn_s > pfn_e) if (pfn_s > pfn_e)
return 0; return 0;
...@@ -660,10 +647,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) ...@@ -660,10 +647,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
pte_t *ptep; pte_t *ptep;
unsigned int level; unsigned int level;
/* don't track P2M changes in autotranslate guests */
if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
return true;
if (unlikely(pfn >= xen_p2m_size)) { if (unlikely(pfn >= xen_p2m_size)) {
BUG_ON(mfn != INVALID_P2M_ENTRY); BUG_ON(mfn != INVALID_P2M_ENTRY);
return true; return true;
...@@ -711,9 +694,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, ...@@ -711,9 +694,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
int i, ret = 0; int i, ret = 0;
pte_t *pte; pte_t *pte;
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
if (kmap_ops) { if (kmap_ops) {
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
kmap_ops, count); kmap_ops, count);
...@@ -756,9 +736,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, ...@@ -756,9 +736,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
{ {
int i, ret = 0; int i, ret = 0;
if (xen_feature(XENFEAT_auto_translated_physmap))
return 0;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
unsigned long pfn = page_to_pfn(pages[i]); unsigned long pfn = page_to_pfn(pages[i]);
......
...@@ -340,8 +340,6 @@ static void __init xen_do_set_identity_and_remap_chunk( ...@@ -340,8 +340,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
WARN_ON(size == 0); WARN_ON(size == 0);
BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
mfn_save = virt_to_mfn(buf); mfn_save = virt_to_mfn(buf);
for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn; for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
...@@ -1024,8 +1022,7 @@ void __init xen_pvmmu_arch_setup(void) ...@@ -1024,8 +1022,7 @@ void __init xen_pvmmu_arch_setup(void)
void __init xen_arch_setup(void) void __init xen_arch_setup(void)
{ {
xen_panic_handler_init(); xen_panic_handler_init();
if (!xen_feature(XENFEAT_auto_translated_physmap)) xen_pvmmu_arch_setup();
xen_pvmmu_arch_setup();
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
if (!(xen_start_info->flags & SIF_INITDOMAIN)) { if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
......
...@@ -196,6 +196,18 @@ config XEN_PCIDEV_BACKEND ...@@ -196,6 +196,18 @@ config XEN_PCIDEV_BACKEND
If in doubt, say m. If in doubt, say m.
config XEN_PVCALLS_BACKEND
bool "XEN PV Calls backend driver"
depends on INET && XEN && XEN_BACKEND
default n
help
Experimental backend for the Xen PV Calls protocol
(https://xenbits.xen.org/docs/unstable/misc/pvcalls.html). It
allows PV Calls frontends to send POSIX calls to the backend,
which implements them.
If in doubt, say n.
config XEN_SCSI_BACKEND config XEN_SCSI_BACKEND
tristate "XEN SCSI backend driver" tristate "XEN SCSI backend driver"
depends on XEN && XEN_BACKEND && TARGET_CORE depends on XEN && XEN_BACKEND && TARGET_CORE
......
...@@ -35,6 +35,7 @@ obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o ...@@ -35,6 +35,7 @@ obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
obj-$(CONFIG_XEN_EFI) += efi.o obj-$(CONFIG_XEN_EFI) += efi.o
obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o
obj-$(CONFIG_XEN_AUTO_XLATE) += xlate_mmu.o obj-$(CONFIG_XEN_AUTO_XLATE) += xlate_mmu.o
obj-$(CONFIG_XEN_PVCALLS_BACKEND) += pvcalls-back.o
xen-evtchn-y := evtchn.o xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o xen-gntalloc-y := gntalloc.o
......
...@@ -664,9 +664,11 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages) ...@@ -664,9 +664,11 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
*/ */
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
ret = xen_alloc_p2m_entry(page_to_pfn(page)); if (!xen_feature(XENFEAT_auto_translated_physmap)) {
if (ret < 0) ret = xen_alloc_p2m_entry(page_to_pfn(page));
goto out_undo; if (ret < 0)
goto out_undo;
}
#endif #endif
} else { } else {
ret = add_ballooned_pages(nr_pages - pgno); ret = add_ballooned_pages(nr_pages - pgno);
......
...@@ -432,12 +432,12 @@ static int xen_evtchn_cpu_dead(unsigned int cpu) ...@@ -432,12 +432,12 @@ static int xen_evtchn_cpu_dead(unsigned int cpu)
int __init xen_evtchn_fifo_init(void) int __init xen_evtchn_fifo_init(void)
{ {
int cpu = get_cpu(); int cpu = smp_processor_id();
int ret; int ret;
ret = evtchn_fifo_alloc_control_block(cpu); ret = evtchn_fifo_alloc_control_block(cpu);
if (ret < 0) if (ret < 0)
goto out; return ret;
pr_info("Using FIFO-based ABI\n"); pr_info("Using FIFO-based ABI\n");
...@@ -446,7 +446,6 @@ int __init xen_evtchn_fifo_init(void) ...@@ -446,7 +446,6 @@ int __init xen_evtchn_fifo_init(void)
cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE, cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
"xen/evtchn:prepare", "xen/evtchn:prepare",
xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead); xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
out:
put_cpu();
return ret; return ret;
} }
...@@ -175,7 +175,7 @@ static int platform_pci_probe(struct pci_dev *pdev, ...@@ -175,7 +175,7 @@ static int platform_pci_probe(struct pci_dev *pdev,
return ret; return ret;
} }
static struct pci_device_id platform_pci_tbl[] = { static const struct pci_device_id platform_pci_tbl[] = {
{PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM, {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,} {0,}
......
This diff is collapsed.
...@@ -149,24 +149,6 @@ DECLARE_EVENT_CLASS(xen_mmu__set_pte, ...@@ -149,24 +149,6 @@ DECLARE_EVENT_CLASS(xen_mmu__set_pte,
DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte); DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic); DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
TRACE_EVENT(xen_mmu_set_domain_pte,
TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid),
TP_ARGS(ptep, pteval, domid),
TP_STRUCT__entry(
__field(pte_t *, ptep)
__field(pteval_t, pteval)
__field(unsigned, domid)
),
TP_fast_assign(__entry->ptep = ptep;
__entry->pteval = pteval.pte;
__entry->domid = domid),
TP_printk("ptep %p pteval %0*llx (raw %0*llx) domid %u",
__entry->ptep,
(int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
(int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval,
__entry->domid)
);
TRACE_EVENT(xen_mmu_set_pte_at, TRACE_EVENT(xen_mmu_set_pte_at,
TP_PROTO(struct mm_struct *mm, unsigned long addr, TP_PROTO(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval), pte_t *ptep, pte_t pteval),
...@@ -266,16 +248,6 @@ TRACE_EVENT(xen_mmu_set_p4d, ...@@ -266,16 +248,6 @@ TRACE_EVENT(xen_mmu_set_p4d,
(int)sizeof(p4dval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->p4dval)), (int)sizeof(p4dval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->p4dval)),
(int)sizeof(p4dval_t) * 2, (unsigned long long)__entry->p4dval) (int)sizeof(p4dval_t) * 2, (unsigned long long)__entry->p4dval)
); );
TRACE_EVENT(xen_mmu_pud_clear,
TP_PROTO(pud_t *pudp),
TP_ARGS(pudp),
TP_STRUCT__entry(
__field(pud_t *, pudp)
),
TP_fast_assign(__entry->pudp = pudp),
TP_printk("pudp %p", __entry->pudp)
);
#else #else
TRACE_EVENT(xen_mmu_set_pud, TRACE_EVENT(xen_mmu_set_pud,
...@@ -295,16 +267,6 @@ TRACE_EVENT(xen_mmu_set_pud, ...@@ -295,16 +267,6 @@ TRACE_EVENT(xen_mmu_set_pud,
#endif #endif
TRACE_EVENT(xen_mmu_pgd_clear,
TP_PROTO(pgd_t *pgdp),
TP_ARGS(pgdp),
TP_STRUCT__entry(
__field(pgd_t *, pgdp)
),
TP_fast_assign(__entry->pgdp = pgdp),
TP_printk("pgdp %p", __entry->pgdp)
);
DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot, DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot,
TP_PROTO(struct mm_struct *mm, unsigned long addr, TP_PROTO(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval), pte_t *ptep, pte_t pteval),
......
#ifndef __XEN_PUBLIC_IO_XEN_PVCALLS_H__
#define __XEN_PUBLIC_IO_XEN_PVCALLS_H__
#include <linux/net.h>
#include <xen/interface/io/ring.h>
#include <xen/interface/grant_table.h>
/* "1" means socket, connect, release, bind, listen, accept and poll */
#define XENBUS_FUNCTIONS_CALLS "1"
/*
* See docs/misc/pvcalls.markdown in xen.git for the full specification:
* https://xenbits.xen.org/docs/unstable/misc/pvcalls.html
*/
struct pvcalls_data_intf {
RING_IDX in_cons, in_prod, in_error;
uint8_t pad1[52];
RING_IDX out_cons, out_prod, out_error;
uint8_t pad2[52];
RING_IDX ring_order;
grant_ref_t ref[];
};
DEFINE_XEN_FLEX_RING(pvcalls);
#define PVCALLS_SOCKET 0
#define PVCALLS_CONNECT 1
#define PVCALLS_RELEASE 2
#define PVCALLS_BIND 3
#define PVCALLS_LISTEN 4
#define PVCALLS_ACCEPT 5
#define PVCALLS_POLL 6
struct xen_pvcalls_request {
uint32_t req_id; /* private to guest, echoed in response */
uint32_t cmd; /* command to execute */
union {
struct xen_pvcalls_socket {
uint64_t id;
uint32_t domain;
uint32_t type;
uint32_t protocol;
} socket;
struct xen_pvcalls_connect {
uint64_t id;
uint8_t addr[28];
uint32_t len;
uint32_t flags;
grant_ref_t ref;
uint32_t evtchn;
} connect;
struct xen_pvcalls_release {
uint64_t id;
uint8_t reuse;
} release;
struct xen_pvcalls_bind {
uint64_t id;
uint8_t addr[28];
uint32_t len;
} bind;
struct xen_pvcalls_listen {
uint64_t id;
uint32_t backlog;
} listen;
struct xen_pvcalls_accept {
uint64_t id;
uint64_t id_new;
grant_ref_t ref;
uint32_t evtchn;
} accept;
struct xen_pvcalls_poll {
uint64_t id;
} poll;
/* dummy member to force sizeof(struct xen_pvcalls_request)
* to match across archs */
struct xen_pvcalls_dummy {
uint8_t dummy[56];
} dummy;
} u;
};
struct xen_pvcalls_response {
uint32_t req_id;
uint32_t cmd;
int32_t ret;
uint32_t pad;
union {
struct _xen_pvcalls_socket {
uint64_t id;
} socket;
struct _xen_pvcalls_connect {
uint64_t id;
} connect;
struct _xen_pvcalls_release {
uint64_t id;
} release;
struct _xen_pvcalls_bind {
uint64_t id;
} bind;
struct _xen_pvcalls_listen {
uint64_t id;
} listen;
struct _xen_pvcalls_accept {
uint64_t id;
} accept;
struct _xen_pvcalls_poll {
uint64_t id;
} poll;
struct _xen_pvcalls_dummy {
uint8_t dummy[8];
} dummy;
} u;
};
DEFINE_RING_TYPES(xen_pvcalls, struct xen_pvcalls_request,
struct xen_pvcalls_response);
#endif
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#ifndef __XEN_PUBLIC_IO_RING_H__ #ifndef __XEN_PUBLIC_IO_RING_H__
#define __XEN_PUBLIC_IO_RING_H__ #define __XEN_PUBLIC_IO_RING_H__
#include <xen/interface/grant_table.h>
typedef unsigned int RING_IDX; typedef unsigned int RING_IDX;
/* Round a 32-bit unsigned constant down to the nearest power of two. */ /* Round a 32-bit unsigned constant down to the nearest power of two. */
......
...@@ -13,11 +13,16 @@ extern enum xen_domain_type xen_domain_type; ...@@ -13,11 +13,16 @@ extern enum xen_domain_type xen_domain_type;
#define xen_domain_type XEN_NATIVE #define xen_domain_type XEN_NATIVE
#endif #endif
#ifdef CONFIG_XEN_PVH
extern bool xen_pvh;
#else
#define xen_pvh 0
#endif
#define xen_domain() (xen_domain_type != XEN_NATIVE) #define xen_domain() (xen_domain_type != XEN_NATIVE)
#define xen_pv_domain() (xen_domain() && \ #define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
xen_domain_type == XEN_PV_DOMAIN) #define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
#define xen_hvm_domain() (xen_domain() && \ #define xen_pvh_domain() (xen_pvh)
xen_domain_type == XEN_HVM_DOMAIN)
#ifdef CONFIG_XEN_DOM0 #ifdef CONFIG_XEN_DOM0
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
...@@ -29,11 +34,4 @@ extern enum xen_domain_type xen_domain_type; ...@@ -29,11 +34,4 @@ extern enum xen_domain_type xen_domain_type;
#define xen_initial_domain() (0) #define xen_initial_domain() (0)
#endif /* CONFIG_XEN_DOM0 */ #endif /* CONFIG_XEN_DOM0 */
#ifdef CONFIG_XEN_PVH
extern bool xen_pvh;
#define xen_pvh_domain() (xen_hvm_domain() && xen_pvh)
#else
#define xen_pvh_domain() (0)
#endif
#endif /* _XEN_XEN_H */ #endif /* _XEN_XEN_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment