Commit 9ad95bda authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-4.15-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:
 "This contains two fixes for running under Xen:

   - a fix avoiding resource conflicts between adding mmio areas and
     memory hotplug

   - a fix setting NX bits in page table entries copied from Xen when
     running a PV guest"

* tag 'for-linus-4.15-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/balloon: Mark unallocated host memory as UNUSABLE
  x86-64/Xen: eliminate W+X mappings
parents fca0e39b b3cf8528
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
#include <linux/bootmem.h>
#endif
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/kexec.h> #include <linux/kexec.h>
#include <xen/features.h> #include <xen/features.h>
#include <xen/page.h> #include <xen/page.h>
#include <xen/interface/memory.h>
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
...@@ -331,3 +335,80 @@ void xen_arch_unregister_cpu(int num) ...@@ -331,3 +335,80 @@ void xen_arch_unregister_cpu(int num)
} }
EXPORT_SYMBOL(xen_arch_unregister_cpu); EXPORT_SYMBOL(xen_arch_unregister_cpu);
#endif #endif
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
void __init arch_xen_balloon_init(struct resource *hostmem_resource)
{
struct xen_memory_map memmap;
int rc;
unsigned int i, last_guest_ram;
phys_addr_t max_addr = PFN_PHYS(max_pfn);
struct e820_table *xen_e820_table;
const struct e820_entry *entry;
struct resource *res;
if (!xen_initial_domain())
return;
xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
if (!xen_e820_table)
return;
memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
if (rc) {
pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
goto out;
}
last_guest_ram = 0;
for (i = 0; i < memmap.nr_entries; i++) {
if (xen_e820_table->entries[i].addr >= max_addr)
break;
if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
last_guest_ram = i;
}
entry = &xen_e820_table->entries[last_guest_ram];
if (max_addr >= entry->addr + entry->size)
goto out; /* No unallocated host RAM. */
hostmem_resource->start = max_addr;
hostmem_resource->end = entry->addr + entry->size;
/*
* Mark non-RAM regions between the end of dom0 RAM and end of host RAM
* as unavailable. The rest of that region can be used for hotplug-based
* ballooning.
*/
for (; i < memmap.nr_entries; i++) {
entry = &xen_e820_table->entries[i];
if (entry->type == E820_TYPE_RAM)
continue;
if (entry->addr >= hostmem_resource->end)
break;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
goto out;
res->name = "Unavailable host RAM";
res->start = entry->addr;
res->end = (entry->addr + entry->size < hostmem_resource->end) ?
entry->addr + entry->size : hostmem_resource->end;
rc = insert_resource(hostmem_resource, res);
if (rc) {
pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
__func__, res->start, res->end, rc);
kfree(res);
goto out;
}
}
out:
kfree(xen_e820_table);
}
#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
...@@ -88,6 +88,8 @@ ...@@ -88,6 +88,8 @@
#include "multicalls.h" #include "multicalls.h"
#include "pmu.h" #include "pmu.h"
#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */
void *xen_initial_gdt; void *xen_initial_gdt;
static int xen_cpu_up_prepare_pv(unsigned int cpu); static int xen_cpu_up_prepare_pv(unsigned int cpu);
...@@ -1258,6 +1260,7 @@ asmlinkage __visible void __init xen_start_kernel(void) ...@@ -1258,6 +1260,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
__userpte_alloc_gfp &= ~__GFP_HIGHMEM; __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
/* Work out if we support NX */ /* Work out if we support NX */
get_cpu_cap(&boot_cpu_data);
x86_configure_nx(); x86_configure_nx();
/* Get mfn list */ /* Get mfn list */
......
...@@ -1902,6 +1902,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1902,6 +1902,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* Graft it onto L4[511][510] */ /* Graft it onto L4[511][510] */
copy_page(level2_kernel_pgt, l2); copy_page(level2_kernel_pgt, l2);
/*
* Zap execute permission from the ident map. Due to the sharing of
* L1 entries we need to do this in the L2.
*/
if (__supported_pte_mask & _PAGE_NX) {
for (i = 0; i < PTRS_PER_PMD; ++i) {
if (pmd_none(level2_ident_pgt[i]))
continue;
level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
}
}
/* Copy the initial P->M table mappings if necessary. */ /* Copy the initial P->M table mappings if necessary. */
i = pgd_index(xen_start_info->mfn_list); i = pgd_index(xen_start_info->mfn_list);
if (i && i < pgd_index(__START_KERNEL_map)) if (i && i < pgd_index(__START_KERNEL_map))
......
...@@ -808,7 +808,6 @@ char * __init xen_memory_setup(void) ...@@ -808,7 +808,6 @@ char * __init xen_memory_setup(void)
addr = xen_e820_table.entries[0].addr; addr = xen_e820_table.entries[0].addr;
size = xen_e820_table.entries[0].size; size = xen_e820_table.entries[0].size;
while (i < xen_e820_table.nr_entries) { while (i < xen_e820_table.nr_entries) {
bool discard = false;
chunk_size = size; chunk_size = size;
type = xen_e820_table.entries[i].type; type = xen_e820_table.entries[i].type;
...@@ -824,11 +823,10 @@ char * __init xen_memory_setup(void) ...@@ -824,11 +823,10 @@ char * __init xen_memory_setup(void)
xen_add_extra_mem(pfn_s, n_pfns); xen_add_extra_mem(pfn_s, n_pfns);
xen_max_p2m_pfn = pfn_s + n_pfns; xen_max_p2m_pfn = pfn_s + n_pfns;
} else } else
discard = true; type = E820_TYPE_UNUSABLE;
} }
if (!discard) xen_align_and_add_e820_region(addr, chunk_size, type);
xen_align_and_add_e820_region(addr, chunk_size, type);
addr += chunk_size; addr += chunk_size;
size -= chunk_size; size -= chunk_size;
......
...@@ -257,10 +257,25 @@ static void release_memory_resource(struct resource *resource) ...@@ -257,10 +257,25 @@ static void release_memory_resource(struct resource *resource)
kfree(resource); kfree(resource);
} }
/*
* Host memory not allocated to dom0. We can use this range for hotplug-based
* ballooning.
*
* It's a type-less resource. Setting IORESOURCE_MEM will make resource
* management algorithms (arch_remove_reservations()) look into guest e820,
* which we don't want.
*/
static struct resource hostmem_resource = {
.name = "Host RAM",
};
void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
{}
static struct resource *additional_memory_resource(phys_addr_t size) static struct resource *additional_memory_resource(phys_addr_t size)
{ {
struct resource *res; struct resource *res, *res_hostmem;
int ret; int ret = -ENOMEM;
res = kzalloc(sizeof(*res), GFP_KERNEL); res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res) if (!res)
...@@ -269,13 +284,42 @@ static struct resource *additional_memory_resource(phys_addr_t size) ...@@ -269,13 +284,42 @@ static struct resource *additional_memory_resource(phys_addr_t size)
res->name = "System RAM"; res->name = "System RAM";
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
ret = allocate_resource(&iomem_resource, res, res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
size, 0, -1, if (res_hostmem) {
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); /* Try to grab a range from hostmem */
if (ret < 0) { res_hostmem->name = "Host memory";
pr_err("Cannot allocate new System RAM resource\n"); ret = allocate_resource(&hostmem_resource, res_hostmem,
kfree(res); size, 0, -1,
return NULL; PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
}
if (!ret) {
/*
* Insert this resource into iomem. Because hostmem_resource
* tracks portion of guest e820 marked as UNUSABLE noone else
* should try to use it.
*/
res->start = res_hostmem->start;
res->end = res_hostmem->end;
ret = insert_resource(&iomem_resource, res);
if (ret < 0) {
pr_err("Can't insert iomem_resource [%llx - %llx]\n",
res->start, res->end);
release_memory_resource(res_hostmem);
res_hostmem = NULL;
res->start = res->end = 0;
}
}
if (ret) {
ret = allocate_resource(&iomem_resource, res,
size, 0, -1,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
if (ret < 0) {
pr_err("Cannot allocate new System RAM resource\n");
kfree(res);
return NULL;
}
} }
#ifdef CONFIG_SPARSEMEM #ifdef CONFIG_SPARSEMEM
...@@ -287,6 +331,7 @@ static struct resource *additional_memory_resource(phys_addr_t size) ...@@ -287,6 +331,7 @@ static struct resource *additional_memory_resource(phys_addr_t size)
pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
pfn, limit); pfn, limit);
release_memory_resource(res); release_memory_resource(res);
release_memory_resource(res_hostmem);
return NULL; return NULL;
} }
} }
...@@ -765,6 +810,8 @@ static int __init balloon_init(void) ...@@ -765,6 +810,8 @@ static int __init balloon_init(void)
set_online_page_callback(&xen_online_page); set_online_page_callback(&xen_online_page);
register_memory_notifier(&xen_memory_nb); register_memory_notifier(&xen_memory_nb);
register_sysctl_table(xen_root); register_sysctl_table(xen_root);
arch_xen_balloon_init(&hostmem_resource);
#endif #endif
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
......
...@@ -44,3 +44,8 @@ static inline void xen_balloon_init(void) ...@@ -44,3 +44,8 @@ static inline void xen_balloon_init(void)
{ {
} }
#endif #endif
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
struct resource;
void arch_xen_balloon_init(struct resource *hostmem_resource);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment