Commit 77be36de authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'stable/for-linus-3.9-rc0-tag' of...

Merge tag 'stable/for-linus-3.9-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

Pull Xen update from Konrad Rzeszutek Wilk:
 "This has two new ACPI drivers for Xen - a physical CPU offline/online
  and a memory hotplug.  The way this works is that ACPI kicks the
  drivers and they make the appropiate hypercall to the hypervisor to
  tell it that there is a new CPU or memory.  There also some changes to
  the Xen ARM ABIs and couple of fixes.  One particularly nasty bug in
  the Xen PV spinlock code was fixed by Stefan Bader - and has been
  there since the 2.6.32!

  Features:
   - Xen ACPI memory and CPU hotplug drivers - allowing Xen hypervisor
     to be aware of new CPU and new DIMMs
   - Cleanups
  Bug-fixes:
   - Fixes a long-standing bug in the PV spinlock wherein we did not
     kick VCPUs that were in a tight loop.
   - Fixes in the error paths for the event channel machinery"

Fix up a few semantic conflicts with the ACPI interface changes in
drivers/xen/xen-acpi-{cpu,mem}hotplug.c.

* tag 'stable/for-linus-3.9-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen: event channel arrays are xen_ulong_t and not unsigned long
  xen: Send spinlock IPI to all waiters
  xen: introduce xen_remap, use it instead of ioremap
  xen: close evtchn port if binding to irq fails
  xen-evtchn: correct comment and error output
  xen/tmem: Add missing %s in the printk statement.
  xen/acpi: move xen_acpi_get_pxm under CONFIG_XEN_DOM0
  xen/acpi: ACPI cpu hotplug
  xen/acpi: Move xen_acpi_get_pxm to Xen's acpi.h
  xen/stub: driver for CPU hotplug
  xen/acpi: ACPI memory hotplug
  xen/stub: driver for memory hotplug
  xen: implement updated XENMEM_add_to_physmap_range ABI
  xen/smp: Move the common CPU init code a bit to prep for PVH patch.
parents 89f88337 c81611c4
...@@ -15,4 +15,26 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) ...@@ -15,4 +15,26 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->ARM_cpsr); return raw_irqs_disabled_flags(regs->ARM_cpsr);
} }
/*
* We cannot use xchg because it does not support 8-byte
* values. However it is safe to use {ldr,dtd}exd directly because all
* platforms which Xen can run on support those instructions.
*/
static inline xen_ulong_t xchg_xen_ulong(xen_ulong_t *ptr, xen_ulong_t val)
{
xen_ulong_t oldval;
unsigned int tmp;
wmb();
asm volatile("@ xchg_xen_ulong\n"
"1: ldrexd %0, %H0, [%3]\n"
" strexd %1, %2, %H2, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (oldval), "=&r" (tmp)
: "r" (val), "r" (ptr)
: "memory", "cc");
return oldval;
}
#endif /* _ASM_ARM_XEN_EVENTS_H */ #endif /* _ASM_ARM_XEN_EVENTS_H */
#ifndef _ASM_ARM_XEN_PAGE_H #ifndef _ASM_ARM_XEN_PAGE_H
#define _ASM_ARM_XEN_PAGE_H #define _ASM_ARM_XEN_PAGE_H
#include <asm/mach/map.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -86,4 +87,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) ...@@ -86,4 +87,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{ {
return __set_phys_to_machine(pfn, mfn); return __set_phys_to_machine(pfn, mfn);
} }
#define xen_remap(cookie, size) __arm_ioremap((cookie), (size), MT_MEMORY);
#endif /* _ASM_ARM_XEN_PAGE_H */ #endif /* _ASM_ARM_XEN_PAGE_H */
...@@ -59,14 +59,16 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, ...@@ -59,14 +59,16 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
}; };
xen_ulong_t idx = fgmfn; xen_ulong_t idx = fgmfn;
xen_pfn_t gpfn = lpfn; xen_pfn_t gpfn = lpfn;
int err = 0;
set_xen_guest_handle(xatp.idxs, &idx); set_xen_guest_handle(xatp.idxs, &idx);
set_xen_guest_handle(xatp.gpfns, &gpfn); set_xen_guest_handle(xatp.gpfns, &gpfn);
set_xen_guest_handle(xatp.errs, &err);
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
if (rc) { if (rc || err) {
pr_warn("Failed to map pfn to mfn rc:%d pfn:%lx mfn:%lx\n", pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
rc, lpfn, fgmfn); rc, err, lpfn, fgmfn);
return 1; return 1;
} }
return 0; return 0;
......
...@@ -16,4 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) ...@@ -16,4 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->flags); return raw_irqs_disabled_flags(regs->flags);
} }
/* No need for a barrier -- XCHG is a barrier on x86. */
#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
#endif /* _ASM_X86_XEN_EVENTS_H */ #endif /* _ASM_X86_XEN_EVENTS_H */
...@@ -212,4 +212,6 @@ unsigned long arbitrary_virt_to_mfn(void *vaddr); ...@@ -212,4 +212,6 @@ unsigned long arbitrary_virt_to_mfn(void *vaddr);
void make_lowmem_page_readonly(void *vaddr); void make_lowmem_page_readonly(void *vaddr);
void make_lowmem_page_readwrite(void *vaddr); void make_lowmem_page_readwrite(void *vaddr);
#define xen_remap(cookie, size) ioremap((cookie), (size));
#endif /* _ASM_X86_XEN_PAGE_H */ #endif /* _ASM_X86_XEN_PAGE_H */
...@@ -300,8 +300,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ...@@ -300,8 +300,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
gdt = get_cpu_gdt_table(cpu); gdt = get_cpu_gdt_table(cpu);
ctxt->flags = VGCF_IN_KERNEL; ctxt->flags = VGCF_IN_KERNEL;
ctxt->user_regs.ds = __USER_DS;
ctxt->user_regs.es = __USER_DS;
ctxt->user_regs.ss = __KERNEL_DS; ctxt->user_regs.ss = __KERNEL_DS;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
ctxt->user_regs.fs = __KERNEL_PERCPU; ctxt->user_regs.fs = __KERNEL_PERCPU;
...@@ -310,10 +308,14 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ...@@ -310,10 +308,14 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
ctxt->gs_base_kernel = per_cpu_offset(cpu); ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif #endif
ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
{
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
ctxt->user_regs.ds = __USER_DS;
ctxt->user_regs.es = __USER_DS;
xen_copy_trap_info(ctxt->trap_ctxt); xen_copy_trap_info(ctxt->trap_ctxt);
ctxt->ldt_ents = 0; ctxt->ldt_ents = 0;
...@@ -327,9 +329,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ...@@ -327,9 +329,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
ctxt->gdt_frames[0] = gdt_mfn; ctxt->gdt_frames[0] = gdt_mfn;
ctxt->gdt_ents = GDT_ENTRIES; ctxt->gdt_ents = GDT_ENTRIES;
ctxt->user_regs.cs = __KERNEL_CS;
ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
ctxt->kernel_ss = __KERNEL_DS; ctxt->kernel_ss = __KERNEL_DS;
ctxt->kernel_sp = idle->thread.sp0; ctxt->kernel_sp = idle->thread.sp0;
...@@ -337,8 +336,13 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ...@@ -337,8 +336,13 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
ctxt->event_callback_cs = __KERNEL_CS; ctxt->event_callback_cs = __KERNEL_CS;
ctxt->failsafe_callback_cs = __KERNEL_CS; ctxt->failsafe_callback_cs = __KERNEL_CS;
#endif #endif
ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback; ctxt->event_callback_eip =
ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback; (unsigned long)xen_hypervisor_callback;
ctxt->failsafe_callback_eip =
(unsigned long)xen_failsafe_callback;
}
ctxt->user_regs.cs = __KERNEL_CS;
ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
......
...@@ -328,7 +328,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) ...@@ -328,7 +328,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
if (per_cpu(lock_spinners, cpu) == xl) { if (per_cpu(lock_spinners, cpu) == xl) {
ADD_STATS(released_slow_kicked, 1); ADD_STATS(released_slow_kicked, 1);
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
break;
} }
} }
} }
......
...@@ -230,7 +230,7 @@ static int xen_hvm_console_init(void) ...@@ -230,7 +230,7 @@ static int xen_hvm_console_init(void)
if (r < 0 || v == 0) if (r < 0 || v == 0)
goto err; goto err;
mfn = v; mfn = v;
info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE); info->intf = xen_remap(mfn << PAGE_SHIFT, PAGE_SIZE);
if (info->intf == NULL) if (info->intf == NULL)
goto err; goto err;
info->vtermno = HVC_COOKIE; info->vtermno = HVC_COOKIE;
......
...@@ -180,6 +180,40 @@ config XEN_PRIVCMD ...@@ -180,6 +180,40 @@ config XEN_PRIVCMD
depends on XEN depends on XEN
default m default m
config XEN_STUB
bool "Xen stub drivers"
depends on XEN && X86_64
default n
help
Allow kernel to install stub drivers, to reserve space for Xen drivers,
i.e. memory hotplug and cpu hotplug, and to block native drivers loaded,
so that real Xen drivers can be modular.
To enable Xen features like cpu and memory hotplug, select Y here.
config XEN_ACPI_HOTPLUG_MEMORY
tristate "Xen ACPI memory hotplug"
depends on XEN_DOM0 && XEN_STUB && ACPI
default n
help
This is Xen ACPI memory hotplug.
Currently Xen only support ACPI memory hot-add. If you want
to hot-add memory at runtime (the hot-added memory cannot be
removed until machine stop), select Y/M here, otherwise select N.
config XEN_ACPI_HOTPLUG_CPU
tristate "Xen ACPI cpu hotplug"
depends on XEN_DOM0 && XEN_STUB && ACPI
select ACPI_CONTAINER
default n
help
Xen ACPI cpu enumerating and hotplugging
For hotplugging, currently Xen only support ACPI cpu hotadd.
If you want to hotadd cpu at runtime (the hotadded cpu cannot
be removed until machine stop), select Y/M here.
config XEN_ACPI_PROCESSOR config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor" tristate "Xen ACPI processor"
depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
......
...@@ -30,6 +30,9 @@ obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o ...@@ -30,6 +30,9 @@ obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
obj-$(CONFIG_XEN_STUB) += xen-stub.o
obj-$(CONFIG_XEN_ACPI_HOTPLUG_MEMORY) += xen-acpi-memhotplug.o
obj-$(CONFIG_XEN_ACPI_HOTPLUG_CPU) += xen-acpi-cpuhotplug.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
xen-evtchn-y := evtchn.o xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o xen-gntdev-y := gntdev.o
......
This diff is collapsed.
...@@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) ...@@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
u->name, (void *)(unsigned long)port); u->name, (void *)(unsigned long)port);
if (rc >= 0) if (rc >= 0)
rc = evtchn_make_refcounted(port); rc = evtchn_make_refcounted(port);
else {
/* bind failed, should close the port now */
struct evtchn_close close;
close.port = port;
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
BUG();
set_port_user(port, NULL);
}
return rc; return rc;
} }
...@@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port) ...@@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port)
{ {
int irq = irq_from_evtchn(port); int irq = irq_from_evtchn(port);
BUG_ON(irq < 0);
unbind_from_irqhandler(irq, (void *)(unsigned long)port); unbind_from_irqhandler(irq, (void *)(unsigned long)port);
set_port_user(port, NULL); set_port_user(port, NULL);
...@@ -534,10 +544,10 @@ static int __init evtchn_init(void) ...@@ -534,10 +544,10 @@ static int __init evtchn_init(void)
spin_lock_init(&port_user_lock); spin_lock_init(&port_user_lock);
/* Create '/dev/misc/evtchn'. */ /* Create '/dev/xen/evtchn'. */
err = misc_register(&evtchn_miscdev); err = misc_register(&evtchn_miscdev);
if (err != 0) { if (err != 0) {
printk(KERN_ALERT "Could not register /dev/misc/evtchn\n"); printk(KERN_ERR "Could not register /dev/xen/evtchn\n");
return err; return err;
} }
......
...@@ -1147,7 +1147,7 @@ static int gnttab_setup(void) ...@@ -1147,7 +1147,7 @@ static int gnttab_setup(void)
return gnttab_map(0, nr_grant_frames - 1); return gnttab_map(0, nr_grant_frames - 1);
if (gnttab_shared.addr == NULL) { if (gnttab_shared.addr == NULL) {
gnttab_shared.addr = ioremap(xen_hvm_resume_frames, gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
PAGE_SIZE * max_nr_gframes); PAGE_SIZE * max_nr_gframes);
if (gnttab_shared.addr == NULL) { if (gnttab_shared.addr == NULL) {
printk(KERN_WARNING printk(KERN_WARNING
......
...@@ -332,6 +332,41 @@ static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id) ...@@ -332,6 +332,41 @@ static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/* Sync with Xen hypervisor after cpu hotadded */
void xen_pcpu_hotplug_sync(void)
{
schedule_work(&xen_pcpu_work);
}
EXPORT_SYMBOL_GPL(xen_pcpu_hotplug_sync);
/*
* For hypervisor presented cpu, return logic cpu id;
* For hypervisor non-presented cpu, return -ENODEV.
*/
int xen_pcpu_id(uint32_t acpi_id)
{
int cpu_id = 0, max_id = 0;
struct xen_platform_op op;
op.cmd = XENPF_get_cpuinfo;
while (cpu_id <= max_id) {
op.u.pcpu_info.xen_cpuid = cpu_id;
if (HYPERVISOR_dom0_op(&op)) {
cpu_id++;
continue;
}
if (acpi_id == op.u.pcpu_info.acpi_id)
return cpu_id;
if (op.u.pcpu_info.max_present > max_id)
max_id = op.u.pcpu_info.max_present;
cpu_id++;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(xen_pcpu_id);
static int __init xen_pcpu_init(void) static int __init xen_pcpu_init(void)
{ {
int irq, ret; int irq, ret;
......
...@@ -385,7 +385,7 @@ static int __init xen_tmem_init(void) ...@@ -385,7 +385,7 @@ static int __init xen_tmem_init(void)
if (old_ops.init != NULL) if (old_ops.init != NULL)
s = " (WARNING: frontswap_ops overridden)"; s = " (WARNING: frontswap_ops overridden)";
printk(KERN_INFO "frontswap enabled, RAM provided by " printk(KERN_INFO "frontswap enabled, RAM provided by "
"Xen Transcendent Memory\n"); "Xen Transcendent Memory%s\n", s);
} }
#endif #endif
#ifdef CONFIG_CLEANCACHE #ifdef CONFIG_CLEANCACHE
......
This diff is collapsed.
This diff is collapsed.
/*
* xen-stub.c - stub drivers to reserve space for Xen
*
* Copyright (C) 2012 Intel Corporation
* Author: Liu Jinsong <jinsong.liu@intel.com>
* Author: Jiang Yunhong <yunhong.jiang@intel.com>
*
* Copyright (C) 2012 Oracle Inc
* Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <acpi/acpi_drivers.h>
#include <xen/acpi.h>
#ifdef CONFIG_ACPI
/*--------------------------------------------
stub driver for Xen memory hotplug
--------------------------------------------*/
static const struct acpi_device_id memory_device_ids[] = {
{ACPI_MEMORY_DEVICE_HID, 0},
{"", 0},
};
static struct acpi_driver xen_stub_memory_device_driver = {
/* same name as native memory driver to block native loaded */
.name = "acpi_memhotplug",
.class = ACPI_MEMORY_DEVICE_CLASS,
.ids = memory_device_ids,
};
int xen_stub_memory_device_init(void)
{
if (!xen_initial_domain())
return -ENODEV;
/* just reserve space for Xen, block native driver loaded */
return acpi_bus_register_driver(&xen_stub_memory_device_driver);
}
EXPORT_SYMBOL_GPL(xen_stub_memory_device_init);
subsys_initcall(xen_stub_memory_device_init);
void xen_stub_memory_device_exit(void)
{
acpi_bus_unregister_driver(&xen_stub_memory_device_driver);
}
EXPORT_SYMBOL_GPL(xen_stub_memory_device_exit);
/*--------------------------------------------
stub driver for Xen cpu hotplug
--------------------------------------------*/
static const struct acpi_device_id processor_device_ids[] = {
{ACPI_PROCESSOR_OBJECT_HID, 0},
{ACPI_PROCESSOR_DEVICE_HID, 0},
{"", 0},
};
static struct acpi_driver xen_stub_processor_driver = {
/* same name as native processor driver to block native loaded */
.name = "processor",
.class = ACPI_PROCESSOR_CLASS,
.ids = processor_device_ids,
};
int xen_stub_processor_init(void)
{
if (!xen_initial_domain())
return -ENODEV;
/* just reserve space for Xen, block native driver loaded */
return acpi_bus_register_driver(&xen_stub_processor_driver);
}
EXPORT_SYMBOL_GPL(xen_stub_processor_init);
subsys_initcall(xen_stub_processor_init);
void xen_stub_processor_exit(void)
{
acpi_bus_unregister_driver(&xen_stub_processor_driver);
}
EXPORT_SYMBOL_GPL(xen_stub_processor_exit);
#endif
...@@ -769,7 +769,7 @@ static int __init xenbus_init(void) ...@@ -769,7 +769,7 @@ static int __init xenbus_init(void)
goto out_error; goto out_error;
xen_store_mfn = (unsigned long)v; xen_store_mfn = (unsigned long)v;
xen_store_interface = xen_store_interface =
ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
break; break;
default: default:
pr_warn("Xenstore state unknown\n"); pr_warn("Xenstore state unknown\n");
......
...@@ -40,6 +40,41 @@ ...@@ -40,6 +40,41 @@
#include <xen/xen.h> #include <xen/xen.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#define ACPI_MEMORY_DEVICE_CLASS "memory"
#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device"
int xen_stub_memory_device_init(void);
void xen_stub_memory_device_exit(void);
#define ACPI_PROCESSOR_CLASS "processor"
#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007"
#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
int xen_stub_processor_init(void);
void xen_stub_processor_exit(void);
void xen_pcpu_hotplug_sync(void);
int xen_pcpu_id(uint32_t acpi_id);
static inline int xen_acpi_get_pxm(acpi_handle h)
{
unsigned long long pxm;
acpi_status status;
acpi_handle handle;
acpi_handle phandle = h;
do {
handle = phandle;
status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
if (ACPI_SUCCESS(status))
return pxm;
status = acpi_get_parent(handle, &phandle);
} while (ACPI_SUCCESS(status));
return -ENXIO;
}
int xen_acpi_notify_hypervisor_state(u8 sleep_state, int xen_acpi_notify_hypervisor_state(u8 sleep_state,
u32 pm1a_cnt, u32 pm1b_cnd); u32 pm1a_cnt, u32 pm1b_cnd);
......
...@@ -190,6 +190,7 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap); ...@@ -190,6 +190,7 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
#define XENMEM_add_to_physmap_range 23 #define XENMEM_add_to_physmap_range 23
struct xen_add_to_physmap_range { struct xen_add_to_physmap_range {
/* IN */
/* Which domain to change the mapping for. */ /* Which domain to change the mapping for. */
domid_t domid; domid_t domid;
uint16_t space; /* => enum phys_map_space */ uint16_t space; /* => enum phys_map_space */
...@@ -203,6 +204,11 @@ struct xen_add_to_physmap_range { ...@@ -203,6 +204,11 @@ struct xen_add_to_physmap_range {
/* GPFN in domid where the source mapping page should appear. */ /* GPFN in domid where the source mapping page should appear. */
GUEST_HANDLE(xen_pfn_t) gpfns; GUEST_HANDLE(xen_pfn_t) gpfns;
/* OUT */
/* Per index error code. */
GUEST_HANDLE(int) errs;
}; };
DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap_range); DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap_range);
......
...@@ -324,10 +324,21 @@ struct xenpf_cpu_ol { ...@@ -324,10 +324,21 @@ struct xenpf_cpu_ol {
}; };
DEFINE_GUEST_HANDLE_STRUCT(xenpf_cpu_ol); DEFINE_GUEST_HANDLE_STRUCT(xenpf_cpu_ol);
/* #define XENPF_cpu_hotadd 58
* CMD 58 and 59 are reserved for cpu hotadd and memory hotadd, struct xenpf_cpu_hotadd {
* which are already occupied at Xen hypervisor side. uint32_t apic_id;
*/ uint32_t acpi_id;
uint32_t pxm;
};
#define XENPF_mem_hotadd 59
struct xenpf_mem_hotadd {
uint64_t spfn;
uint64_t epfn;
uint32_t pxm;
uint32_t flags;
};
#define XENPF_core_parking 60 #define XENPF_core_parking 60
struct xenpf_core_parking { struct xenpf_core_parking {
/* IN variables */ /* IN variables */
...@@ -357,6 +368,8 @@ struct xen_platform_op { ...@@ -357,6 +368,8 @@ struct xen_platform_op {
struct xenpf_set_processor_pminfo set_pminfo; struct xenpf_set_processor_pminfo set_pminfo;
struct xenpf_pcpuinfo pcpu_info; struct xenpf_pcpuinfo pcpu_info;
struct xenpf_cpu_ol cpu_ol; struct xenpf_cpu_ol cpu_ol;
struct xenpf_cpu_hotadd cpu_add;
struct xenpf_mem_hotadd mem_add;
struct xenpf_core_parking core_parking; struct xenpf_core_parking core_parking;
uint8_t pad[128]; uint8_t pad[128];
} u; } u;
......
...@@ -285,7 +285,7 @@ DEFINE_GUEST_HANDLE_STRUCT(multicall_entry); ...@@ -285,7 +285,7 @@ DEFINE_GUEST_HANDLE_STRUCT(multicall_entry);
* Event channel endpoints per domain: * Event channel endpoints per domain:
* 1024 if a long is 32 bits; 4096 if a long is 64 bits. * 1024 if a long is 32 bits; 4096 if a long is 64 bits.
*/ */
#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64) #define NR_EVENT_CHANNELS (sizeof(xen_ulong_t) * sizeof(xen_ulong_t) * 64)
struct vcpu_time_info { struct vcpu_time_info {
/* /*
...@@ -341,7 +341,7 @@ struct vcpu_info { ...@@ -341,7 +341,7 @@ struct vcpu_info {
*/ */
uint8_t evtchn_upcall_pending; uint8_t evtchn_upcall_pending;
uint8_t evtchn_upcall_mask; uint8_t evtchn_upcall_mask;
unsigned long evtchn_pending_sel; xen_ulong_t evtchn_pending_sel;
struct arch_vcpu_info arch; struct arch_vcpu_info arch;
struct pvclock_vcpu_time_info time; struct pvclock_vcpu_time_info time;
}; /* 64 bytes (x86) */ }; /* 64 bytes (x86) */
...@@ -384,8 +384,8 @@ struct shared_info { ...@@ -384,8 +384,8 @@ struct shared_info {
* per-vcpu selector word to be set. Each bit in the selector covers a * per-vcpu selector word to be set. Each bit in the selector covers a
* 'C long' in the PENDING bitfield array. * 'C long' in the PENDING bitfield array.
*/ */
unsigned long evtchn_pending[sizeof(unsigned long) * 8]; xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8];
unsigned long evtchn_mask[sizeof(unsigned long) * 8]; xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8];
/* /*
* Wallclock time: updated only by control software. Guests should base * Wallclock time: updated only by control software. Guests should base
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment