Commit 20ca57cd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'stable/for-linus-3.18-b-rc1-tag' of...

Merge tag 'stable/for-linus-3.18-b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen bug fixes from David Vrabel:

 - Fix regression in xen_clocksource_read() which caused all Xen guests
   to crash early in boot.
 - Several fixes for super rare race conditions in the p2m.
 - Assorted other minor fixes.

* tag 'stable/for-linus-3.18-b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/pci: Allocate memory for physdev_pci_device_add's optarr
  x86/xen: panic on bad Xen-provided memory map
  x86/xen: Fix incorrect per_cpu accessor in xen_clocksource_read()
  x86/xen: avoid race in p2m handling
  x86/xen: delay construction of mfn_list_list
  x86/xen: avoid writing to freed memory after race in p2m handling
  xen/balloon: Don't continue ballooning when BP_ECANCELED is encountered
parents c6d13403 486edb24
...@@ -1636,9 +1636,6 @@ asmlinkage __visible void __init xen_start_kernel(void) ...@@ -1636,9 +1636,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
xen_raw_console_write("mapping kernel into physical memory\n"); xen_raw_console_write("mapping kernel into physical memory\n");
xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages); xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages);
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();
/* keep using Xen gdt for now; no urgent need to change it */ /* keep using Xen gdt for now; no urgent need to change it */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -1217,10 +1217,13 @@ static void __init xen_pagetable_p2m_copy(void) ...@@ -1217,10 +1217,13 @@ static void __init xen_pagetable_p2m_copy(void)
static void __init xen_pagetable_init(void) static void __init xen_pagetable_init(void)
{ {
paging_init(); paging_init();
xen_setup_shared_info();
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
xen_pagetable_p2m_copy(); xen_pagetable_p2m_copy();
#endif #endif
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();
xen_setup_shared_info();
xen_post_allocator_init(); xen_post_allocator_init();
} }
static void xen_write_cr2(unsigned long cr2) static void xen_write_cr2(unsigned long cr2)
......
...@@ -163,6 +163,7 @@ ...@@ -163,6 +163,7 @@
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/bootmem.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -181,21 +182,20 @@ static void __init m2p_override_init(void); ...@@ -181,21 +182,20 @@ static void __init m2p_override_init(void);
unsigned long xen_max_p2m_pfn __read_mostly; unsigned long xen_max_p2m_pfn __read_mostly;
static unsigned long *p2m_mid_missing_mfn;
static unsigned long *p2m_top_mfn;
static unsigned long **p2m_top_mfn_p;
/* Placeholders for holes in the address space */ /* Placeholders for holes in the address space */
static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_identity, P2M_MID_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_identity, P2M_MID_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_identity_mfn, P2M_MID_PER_PAGE);
RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
/* For each I/O range remapped we may lose up to two leaf pages for the boundary /* For each I/O range remapped we may lose up to two leaf pages for the boundary
* violations and three mid pages to cover up to 3GB. With * violations and three mid pages to cover up to 3GB. With
...@@ -272,11 +272,11 @@ static void p2m_init(unsigned long *p2m) ...@@ -272,11 +272,11 @@ static void p2m_init(unsigned long *p2m)
* Build the parallel p2m_top_mfn and p2m_mid_mfn structures * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
* *
* This is called both at boot time, and after resuming from suspend: * This is called both at boot time, and after resuming from suspend:
* - At boot time we're called very early, and must use extend_brk() * - At boot time we're called rather early, and must use alloc_bootmem*()
* to allocate memory. * to allocate memory.
* *
* - After resume we're called from within stop_machine, but the mfn * - After resume we're called from within stop_machine, but the mfn
* tree should alreay be completely allocated. * tree should already be completely allocated.
*/ */
void __ref xen_build_mfn_list_list(void) void __ref xen_build_mfn_list_list(void)
{ {
...@@ -287,20 +287,17 @@ void __ref xen_build_mfn_list_list(void) ...@@ -287,20 +287,17 @@ void __ref xen_build_mfn_list_list(void)
/* Pre-initialize p2m_top_mfn to be completely missing */ /* Pre-initialize p2m_top_mfn to be completely missing */
if (p2m_top_mfn == NULL) { if (p2m_top_mfn == NULL) {
p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_missing_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
p2m_mid_identity_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
p2m_mid_mfn_init(p2m_mid_identity_mfn, p2m_identity);
p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
p2m_top_mfn_p_init(p2m_top_mfn_p); p2m_top_mfn_p_init(p2m_top_mfn_p);
p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
p2m_top_mfn_init(p2m_top_mfn); p2m_top_mfn_init(p2m_top_mfn);
} else { } else {
/* Reinitialise, mfn's all change after migration */ /* Reinitialise, mfn's all change after migration */
p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
p2m_mid_mfn_init(p2m_mid_identity_mfn, p2m_identity);
} }
for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
...@@ -328,10 +325,9 @@ void __ref xen_build_mfn_list_list(void) ...@@ -328,10 +325,9 @@ void __ref xen_build_mfn_list_list(void)
/* /*
* XXX boot-time only! We should never find * XXX boot-time only! We should never find
* missing parts of the mfn tree after * missing parts of the mfn tree after
* runtime. extend_brk() will BUG if we call * runtime.
* it too late.
*/ */
mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); mid_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
p2m_mid_mfn_init(mid_mfn_p, p2m_missing); p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
p2m_top_mfn_p[topidx] = mid_mfn_p; p2m_top_mfn_p[topidx] = mid_mfn_p;
...@@ -415,7 +411,6 @@ void __init xen_build_dynamic_phys_to_machine(void) ...@@ -415,7 +411,6 @@ void __init xen_build_dynamic_phys_to_machine(void)
m2p_override_init(); m2p_override_init();
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#include <linux/bootmem.h>
unsigned long __init xen_revector_p2m_tree(void) unsigned long __init xen_revector_p2m_tree(void)
{ {
unsigned long va_start; unsigned long va_start;
...@@ -477,7 +472,6 @@ unsigned long __init xen_revector_p2m_tree(void) ...@@ -477,7 +472,6 @@ unsigned long __init xen_revector_p2m_tree(void)
copy_page(new, mid_p); copy_page(new, mid_p);
p2m_top[topidx][mididx] = &mfn_list[pfn_free]; p2m_top[topidx][mididx] = &mfn_list[pfn_free];
p2m_top_mfn_p[topidx][mididx] = virt_to_mfn(&mfn_list[pfn_free]);
pfn_free += P2M_PER_PAGE; pfn_free += P2M_PER_PAGE;
...@@ -538,12 +532,13 @@ static bool alloc_p2m(unsigned long pfn) ...@@ -538,12 +532,13 @@ static bool alloc_p2m(unsigned long pfn)
unsigned topidx, mididx; unsigned topidx, mididx;
unsigned long ***top_p, **mid; unsigned long ***top_p, **mid;
unsigned long *top_mfn_p, *mid_mfn; unsigned long *top_mfn_p, *mid_mfn;
unsigned long *p2m_orig;
topidx = p2m_top_index(pfn); topidx = p2m_top_index(pfn);
mididx = p2m_mid_index(pfn); mididx = p2m_mid_index(pfn);
top_p = &p2m_top[topidx]; top_p = &p2m_top[topidx];
mid = *top_p; mid = ACCESS_ONCE(*top_p);
if (mid == p2m_mid_missing) { if (mid == p2m_mid_missing) {
/* Mid level is missing, allocate a new one */ /* Mid level is missing, allocate a new one */
...@@ -558,7 +553,7 @@ static bool alloc_p2m(unsigned long pfn) ...@@ -558,7 +553,7 @@ static bool alloc_p2m(unsigned long pfn)
} }
top_mfn_p = &p2m_top_mfn[topidx]; top_mfn_p = &p2m_top_mfn[topidx];
mid_mfn = p2m_top_mfn_p[topidx]; mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
...@@ -566,6 +561,7 @@ static bool alloc_p2m(unsigned long pfn) ...@@ -566,6 +561,7 @@ static bool alloc_p2m(unsigned long pfn)
/* Separately check the mid mfn level */ /* Separately check the mid mfn level */
unsigned long missing_mfn; unsigned long missing_mfn;
unsigned long mid_mfn_mfn; unsigned long mid_mfn_mfn;
unsigned long old_mfn;
mid_mfn = alloc_p2m_page(); mid_mfn = alloc_p2m_page();
if (!mid_mfn) if (!mid_mfn)
...@@ -575,17 +571,19 @@ static bool alloc_p2m(unsigned long pfn) ...@@ -575,17 +571,19 @@ static bool alloc_p2m(unsigned long pfn)
missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
mid_mfn_mfn = virt_to_mfn(mid_mfn); mid_mfn_mfn = virt_to_mfn(mid_mfn);
if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn) old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn);
if (old_mfn != missing_mfn) {
free_p2m_page(mid_mfn); free_p2m_page(mid_mfn);
else mid_mfn = mfn_to_virt(old_mfn);
} else {
p2m_top_mfn_p[topidx] = mid_mfn; p2m_top_mfn_p[topidx] = mid_mfn;
}
} }
if (p2m_top[topidx][mididx] == p2m_identity || p2m_orig = ACCESS_ONCE(p2m_top[topidx][mididx]);
p2m_top[topidx][mididx] == p2m_missing) { if (p2m_orig == p2m_identity || p2m_orig == p2m_missing) {
/* p2m leaf page is missing */ /* p2m leaf page is missing */
unsigned long *p2m; unsigned long *p2m;
unsigned long *p2m_orig = p2m_top[topidx][mididx];
p2m = alloc_p2m_page(); p2m = alloc_p2m_page();
if (!p2m) if (!p2m)
...@@ -606,7 +604,6 @@ static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary) ...@@ -606,7 +604,6 @@ static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary)
{ {
unsigned topidx, mididx, idx; unsigned topidx, mididx, idx;
unsigned long *p2m; unsigned long *p2m;
unsigned long *mid_mfn_p;
topidx = p2m_top_index(pfn); topidx = p2m_top_index(pfn);
mididx = p2m_mid_index(pfn); mididx = p2m_mid_index(pfn);
...@@ -633,43 +630,21 @@ static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary) ...@@ -633,43 +630,21 @@ static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary)
p2m_top[topidx][mididx] = p2m; p2m_top[topidx][mididx] = p2m;
/* For save/restore we need to MFN of the P2M saved */
mid_mfn_p = p2m_top_mfn_p[topidx];
WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
"P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
topidx, mididx);
mid_mfn_p[mididx] = virt_to_mfn(p2m);
return true; return true;
} }
static bool __init early_alloc_p2m_middle(unsigned long pfn) static bool __init early_alloc_p2m_middle(unsigned long pfn)
{ {
unsigned topidx = p2m_top_index(pfn); unsigned topidx = p2m_top_index(pfn);
unsigned long *mid_mfn_p;
unsigned long **mid; unsigned long **mid;
mid = p2m_top[topidx]; mid = p2m_top[topidx];
mid_mfn_p = p2m_top_mfn_p[topidx];
if (mid == p2m_mid_missing) { if (mid == p2m_mid_missing) {
mid = extend_brk(PAGE_SIZE, PAGE_SIZE); mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
p2m_mid_init(mid, p2m_missing); p2m_mid_init(mid, p2m_missing);
p2m_top[topidx] = mid; p2m_top[topidx] = mid;
BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
}
/* And the save/restore P2M tables.. */
if (mid_mfn_p == p2m_mid_missing_mfn) {
mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
p2m_top_mfn_p[topidx] = mid_mfn_p;
p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
/* Note: we don't set mid_mfn_p[midix] here,
* look in early_alloc_p2m() */
} }
return true; return true;
} }
...@@ -680,14 +655,13 @@ static bool __init early_alloc_p2m_middle(unsigned long pfn) ...@@ -680,14 +655,13 @@ static bool __init early_alloc_p2m_middle(unsigned long pfn)
* replace the P2M leaf with a p2m_missing or p2m_identity. * replace the P2M leaf with a p2m_missing or p2m_identity.
* Stick the old page in the new P2M tree location. * Stick the old page in the new P2M tree location.
*/ */
bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn) static bool __init early_can_reuse_p2m_middle(unsigned long set_pfn)
{ {
unsigned topidx; unsigned topidx;
unsigned mididx; unsigned mididx;
unsigned ident_pfns; unsigned ident_pfns;
unsigned inv_pfns; unsigned inv_pfns;
unsigned long *p2m; unsigned long *p2m;
unsigned long *mid_mfn_p;
unsigned idx; unsigned idx;
unsigned long pfn; unsigned long pfn;
...@@ -733,11 +707,6 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_ ...@@ -733,11 +707,6 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_
found: found:
/* Found one, replace old with p2m_identity or p2m_missing */ /* Found one, replace old with p2m_identity or p2m_missing */
p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing); p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
/* And the other for save/restore.. */
mid_mfn_p = p2m_top_mfn_p[topidx];
/* NOTE: Even if it is a p2m_identity it should still be point to
* a page filled with INVALID_P2M_ENTRY entries. */
mid_mfn_p[mididx] = virt_to_mfn(p2m_missing);
/* Reset where we want to stick the old page in. */ /* Reset where we want to stick the old page in. */
topidx = p2m_top_index(set_pfn); topidx = p2m_top_index(set_pfn);
...@@ -752,8 +721,6 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_ ...@@ -752,8 +721,6 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_
p2m_init(p2m); p2m_init(p2m);
p2m_top[topidx][mididx] = p2m; p2m_top[topidx][mididx] = p2m;
mid_mfn_p = p2m_top_mfn_p[topidx];
mid_mfn_p[mididx] = virt_to_mfn(p2m);
return true; return true;
} }
...@@ -763,7 +730,7 @@ bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) ...@@ -763,7 +730,7 @@ bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
if (!early_alloc_p2m_middle(pfn)) if (!early_alloc_p2m_middle(pfn))
return false; return false;
if (early_can_reuse_p2m_middle(pfn, mfn)) if (early_can_reuse_p2m_middle(pfn))
return __set_phys_to_machine(pfn, mfn); return __set_phys_to_machine(pfn, mfn);
if (!early_alloc_p2m(pfn, false /* boundary crossover OK!*/)) if (!early_alloc_p2m(pfn, false /* boundary crossover OK!*/))
......
...@@ -595,6 +595,7 @@ char * __init xen_memory_setup(void) ...@@ -595,6 +595,7 @@ char * __init xen_memory_setup(void)
rc = 0; rc = 0;
} }
BUG_ON(rc); BUG_ON(rc);
BUG_ON(memmap.nr_entries == 0);
/* /*
* Xen won't allow a 1:1 mapping to be created to UNUSABLE * Xen won't allow a 1:1 mapping to be created to UNUSABLE
......
...@@ -158,7 +158,7 @@ cycle_t xen_clocksource_read(void) ...@@ -158,7 +158,7 @@ cycle_t xen_clocksource_read(void)
cycle_t ret; cycle_t ret;
preempt_disable_notrace(); preempt_disable_notrace();
src = this_cpu_ptr(&xen_vcpu->time); src = &__this_cpu_read(xen_vcpu)->time;
ret = pvclock_clocksource_read(src); ret = pvclock_clocksource_read(src);
preempt_enable_notrace(); preempt_enable_notrace();
return ret; return ret;
......
...@@ -167,6 +167,9 @@ static struct page *balloon_next_page(struct page *page) ...@@ -167,6 +167,9 @@ static struct page *balloon_next_page(struct page *page)
static enum bp_state update_schedule(enum bp_state state) static enum bp_state update_schedule(enum bp_state state)
{ {
if (state == BP_ECANCELED)
return BP_ECANCELED;
if (state == BP_DONE) { if (state == BP_DONE) {
balloon_stats.schedule_delay = 1; balloon_stats.schedule_delay = 1;
balloon_stats.retry_count = 1; balloon_stats.retry_count = 1;
......
...@@ -41,24 +41,29 @@ static int xen_add_device(struct device *dev) ...@@ -41,24 +41,29 @@ static int xen_add_device(struct device *dev)
#endif #endif
if (pci_seg_supported) { if (pci_seg_supported) {
struct physdev_pci_device_add add = { struct {
.seg = pci_domain_nr(pci_dev->bus), struct physdev_pci_device_add add;
.bus = pci_dev->bus->number, uint32_t pxm;
.devfn = pci_dev->devfn } add_ext = {
.add.seg = pci_domain_nr(pci_dev->bus),
.add.bus = pci_dev->bus->number,
.add.devfn = pci_dev->devfn
}; };
struct physdev_pci_device_add *add = &add_ext.add;
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
acpi_handle handle; acpi_handle handle;
#endif #endif
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
if (pci_dev->is_virtfn) { if (pci_dev->is_virtfn) {
add.flags = XEN_PCI_DEV_VIRTFN; add->flags = XEN_PCI_DEV_VIRTFN;
add.physfn.bus = physfn->bus->number; add->physfn.bus = physfn->bus->number;
add.physfn.devfn = physfn->devfn; add->physfn.devfn = physfn->devfn;
} else } else
#endif #endif
if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
add.flags = XEN_PCI_DEV_EXTFN; add->flags = XEN_PCI_DEV_EXTFN;
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
handle = ACPI_HANDLE(&pci_dev->dev); handle = ACPI_HANDLE(&pci_dev->dev);
...@@ -77,8 +82,8 @@ static int xen_add_device(struct device *dev) ...@@ -77,8 +82,8 @@ static int xen_add_device(struct device *dev)
status = acpi_evaluate_integer(handle, "_PXM", status = acpi_evaluate_integer(handle, "_PXM",
NULL, &pxm); NULL, &pxm);
if (ACPI_SUCCESS(status)) { if (ACPI_SUCCESS(status)) {
add.optarr[0] = pxm; add->optarr[0] = pxm;
add.flags |= XEN_PCI_DEV_PXM; add->flags |= XEN_PCI_DEV_PXM;
break; break;
} }
status = acpi_get_parent(handle, &handle); status = acpi_get_parent(handle, &handle);
...@@ -86,7 +91,7 @@ static int xen_add_device(struct device *dev) ...@@ -86,7 +91,7 @@ static int xen_add_device(struct device *dev)
} }
#endif /* CONFIG_ACPI */ #endif /* CONFIG_ACPI */
r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, &add); r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
if (r != -ENOSYS) if (r != -ENOSYS)
return r; return r;
pci_seg_supported = false; pci_seg_supported = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment