Commit 9351f138 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-6.10a-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - a small cleanup in the drivers/xen/xenbus Makefile

 - a fix of the Xen xenstore driver to improve connecting to a late
   started Xenstore

 - an enhancement for better support of ballooning in PVH guests

 - a cleanup using try_cmpxchg() instead of open coding it

* tag 'for-linus-6.10a-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  drivers/xen: Improve the late XenStore init protocol
  xen/xenbus: Use *-y instead of *-objs in Makefile
  xen/x86: add extra pages to unpopulated-alloc if available
  locking/x86/xen: Use try_cmpxchg() in xen_alloc_p2m_entry()
parents 02c438bb a3607581
...@@ -379,3 +379,36 @@ void __init xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns) ...@@ -379,3 +379,36 @@ void __init xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns)
memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
} }
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
int __init arch_xen_unpopulated_init(struct resource **res)
{
unsigned int i;
if (!xen_domain())
return -ENODEV;
/* Must be set strictly before calling xen_free_unpopulated_pages(). */
*res = &iomem_resource;
/*
* Initialize with pages from the extra memory regions (see
* arch/x86/xen/setup.c).
*/
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
unsigned int j;
for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
struct page *pg =
pfn_to_page(xen_extra_mem[i].start_pfn + j);
xen_free_unpopulated_pages(1, &pg);
}
/* Zero so region is not also added to the balloon driver. */
xen_extra_mem[i].n_pfns = 0;
}
return 0;
}
#endif
...@@ -555,7 +555,6 @@ int xen_alloc_p2m_entry(unsigned long pfn) ...@@ -555,7 +555,6 @@ int xen_alloc_p2m_entry(unsigned long pfn)
/* Separately check the mid mfn level */ /* Separately check the mid mfn level */
unsigned long missing_mfn; unsigned long missing_mfn;
unsigned long mid_mfn_mfn; unsigned long mid_mfn_mfn;
unsigned long old_mfn;
mid_mfn = alloc_p2m_page(); mid_mfn = alloc_p2m_page();
if (!mid_mfn) if (!mid_mfn)
...@@ -565,12 +564,12 @@ int xen_alloc_p2m_entry(unsigned long pfn) ...@@ -565,12 +564,12 @@ int xen_alloc_p2m_entry(unsigned long pfn)
missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
mid_mfn_mfn = virt_to_mfn(mid_mfn); mid_mfn_mfn = virt_to_mfn(mid_mfn);
old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn); /* try_cmpxchg() updates missing_mfn on failure. */
if (old_mfn != missing_mfn) { if (try_cmpxchg(top_mfn_p, &missing_mfn, mid_mfn_mfn)) {
free_p2m_page(mid_mfn);
mid_mfn = mfn_to_virt(old_mfn);
} else {
p2m_top_mfn_p[topidx] = mid_mfn; p2m_top_mfn_p[topidx] = mid_mfn;
} else {
free_p2m_page(mid_mfn);
mid_mfn = mfn_to_virt(missing_mfn);
} }
} }
} else { } else {
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-y += xenbus.o obj-y += xenbus.o
obj-y += xenbus_dev_frontend.o
xenbus-objs = xenbus-y := xenbus_client.o
xenbus-objs += xenbus_client.o xenbus-y += xenbus_comms.o
xenbus-objs += xenbus_comms.o xenbus-y += xenbus_xs.o
xenbus-objs += xenbus_xs.o xenbus-y += xenbus_probe.o
xenbus-objs += xenbus_probe.o
xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o xenbus-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
xenbus-objs += $(xenbus-be-objs-y)
obj-y += xenbus_dev_frontend.o
obj-$(CONFIG_XEN_BACKEND) += xenbus_dev_backend.o obj-$(CONFIG_XEN_BACKEND) += xenbus_dev_backend.o
obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o
...@@ -65,13 +65,17 @@ ...@@ -65,13 +65,17 @@
#include "xenbus.h" #include "xenbus.h"
static int xs_init_irq; static int xs_init_irq = -1;
int xen_store_evtchn; int xen_store_evtchn;
EXPORT_SYMBOL_GPL(xen_store_evtchn); EXPORT_SYMBOL_GPL(xen_store_evtchn);
struct xenstore_domain_interface *xen_store_interface; struct xenstore_domain_interface *xen_store_interface;
EXPORT_SYMBOL_GPL(xen_store_interface); EXPORT_SYMBOL_GPL(xen_store_interface);
#define XS_INTERFACE_READY \
((xen_store_interface != NULL) && \
(xen_store_interface->connection == XENSTORE_CONNECTED))
enum xenstore_init xen_store_domain_type; enum xenstore_init xen_store_domain_type;
EXPORT_SYMBOL_GPL(xen_store_domain_type); EXPORT_SYMBOL_GPL(xen_store_domain_type);
...@@ -751,7 +755,7 @@ static void xenbus_probe(void) ...@@ -751,7 +755,7 @@ static void xenbus_probe(void)
{ {
xenstored_ready = 1; xenstored_ready = 1;
if (!xen_store_interface) { if (!xen_store_interface)
xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT, xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
XEN_PAGE_SIZE, MEMREMAP_WB); XEN_PAGE_SIZE, MEMREMAP_WB);
/* /*
...@@ -762,8 +766,8 @@ static void xenbus_probe(void) ...@@ -762,8 +766,8 @@ static void xenbus_probe(void)
* being called and the event channel not being enabled again * being called and the event channel not being enabled again
* afterwards, resulting in missed event notifications. * afterwards, resulting in missed event notifications.
*/ */
if (xs_init_irq >= 0)
free_irq(xs_init_irq, &xb_waitq); free_irq(xs_init_irq, &xb_waitq);
}
/* /*
* In the HVM case, xenbus_init() deferred its call to * In the HVM case, xenbus_init() deferred its call to
...@@ -822,7 +826,7 @@ static int __init xenbus_probe_initcall(void) ...@@ -822,7 +826,7 @@ static int __init xenbus_probe_initcall(void)
if (xen_store_domain_type == XS_PV || if (xen_store_domain_type == XS_PV ||
(xen_store_domain_type == XS_HVM && (xen_store_domain_type == XS_HVM &&
!xs_hvm_defer_init_for_callback() && !xs_hvm_defer_init_for_callback() &&
xen_store_interface != NULL)) XS_INTERFACE_READY))
xenbus_probe(); xenbus_probe();
/* /*
...@@ -831,7 +835,7 @@ static int __init xenbus_probe_initcall(void) ...@@ -831,7 +835,7 @@ static int __init xenbus_probe_initcall(void)
* started, then probe. It will be triggered when communication * started, then probe. It will be triggered when communication
* starts happening, by waiting on xb_waitq. * starts happening, by waiting on xb_waitq.
*/ */
if (xen_store_domain_type == XS_LOCAL || xen_store_interface == NULL) { if (xen_store_domain_type == XS_LOCAL || !XS_INTERFACE_READY) {
struct task_struct *probe_task; struct task_struct *probe_task;
probe_task = kthread_run(xenbus_probe_thread, NULL, probe_task = kthread_run(xenbus_probe_thread, NULL,
...@@ -1014,6 +1018,12 @@ static int __init xenbus_init(void) ...@@ -1014,6 +1018,12 @@ static int __init xenbus_init(void)
xen_store_interface = xen_store_interface =
memremap(xen_store_gfn << XEN_PAGE_SHIFT, memremap(xen_store_gfn << XEN_PAGE_SHIFT,
XEN_PAGE_SIZE, MEMREMAP_WB); XEN_PAGE_SIZE, MEMREMAP_WB);
if (!xen_store_interface) {
pr_err("%s: cannot map HVM_PARAM_STORE_PFN=%llx\n",
__func__, v);
err = -EINVAL;
goto out_error;
}
if (xen_store_interface->connection != XENSTORE_CONNECTED) if (xen_store_interface->connection != XENSTORE_CONNECTED)
wait = true; wait = true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment