Commit 14164b46 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'stable/for-linus-3.14-rc0-late-tag' of...

Merge tag 'stable/for-linus-3.14-rc0-late-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull Xen bugfixes from Konrad Rzeszutek Wilk:
 "Bug-fixes for the new features that were added during this cycle.

  There are also two fixes for long-standing issues for which we have a
  solution: grant-table operations extra work that was not needed
  causing performance issues and the self balloon code was too
  aggressive causing OOMs.

  Details:
   - Xen ARM couldn't use the new FIFO events
   - Xen ARM couldn't use the SWIOTLB if compiled as 32-bit with 64-bit PCIe devices.
   - Grant table were doing needless M2P operations.
   - Ratchet down the self-balloon code so it won't OOM.
   - Fix misplaced kfree in Xen PVH error code paths"

* tag 'stable/for-linus-3.14-rc0-late-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/pvh: Fix misplaced kfree from xlated_setup_gnttab_pages
  drivers: xen: deaggressive selfballoon driver
  xen/grant-table: Avoid m2p_override during mapping
  xen/gnttab: Use phys_addr_t to describe the grant frame base address
  xen: swiotlb: handle sizeof(dma_addr_t) != sizeof(phys_addr_t)
  arm/xen: Initialize event channels earlier
parents e2a0f813 f93576e1
...@@ -1905,6 +1905,7 @@ config XEN ...@@ -1905,6 +1905,7 @@ config XEN
depends on !GENERIC_ATOMIC64 depends on !GENERIC_ATOMIC64
select ARM_PSCI select ARM_PSCI
select SWIOTLB_XEN select SWIOTLB_XEN
select ARCH_DMA_ADDR_T_64BIT
help help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpu.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -154,7 +155,7 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, ...@@ -154,7 +155,7 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
} }
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
static void __init xen_percpu_init(void *unused) static void xen_percpu_init(void)
{ {
struct vcpu_register_vcpu_info info; struct vcpu_register_vcpu_info info;
struct vcpu_info *vcpup; struct vcpu_info *vcpup;
...@@ -193,6 +194,31 @@ static void xen_power_off(void) ...@@ -193,6 +194,31 @@ static void xen_power_off(void)
BUG(); BUG();
} }
static int xen_cpu_notification(struct notifier_block *self,
unsigned long action,
void *hcpu)
{
switch (action) {
case CPU_STARTING:
xen_percpu_init();
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block xen_cpu_notifier = {
.notifier_call = xen_cpu_notification,
};
static irqreturn_t xen_arm_callback(int irq, void *arg)
{
xen_hvm_evtchn_do_upcall();
return IRQ_HANDLED;
}
/* /*
* see Documentation/devicetree/bindings/arm/xen.txt for the * see Documentation/devicetree/bindings/arm/xen.txt for the
* documentation of the Xen Device Tree format. * documentation of the Xen Device Tree format.
...@@ -208,7 +234,7 @@ static int __init xen_guest_init(void) ...@@ -208,7 +234,7 @@ static int __init xen_guest_init(void)
const char *version = NULL; const char *version = NULL;
const char *xen_prefix = "xen,xen-"; const char *xen_prefix = "xen,xen-";
struct resource res; struct resource res;
unsigned long grant_frames; phys_addr_t grant_frames;
node = of_find_compatible_node(NULL, NULL, "xen,xen"); node = of_find_compatible_node(NULL, NULL, "xen,xen");
if (!node) { if (!node) {
...@@ -227,8 +253,12 @@ static int __init xen_guest_init(void) ...@@ -227,8 +253,12 @@ static int __init xen_guest_init(void)
return 0; return 0;
grant_frames = res.start; grant_frames = res.start;
xen_events_irq = irq_of_parse_and_map(node, 0); xen_events_irq = irq_of_parse_and_map(node, 0);
pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", pr_info("Xen %s support found, events_irq=%d gnttab_frame=%pa\n",
version, xen_events_irq, (grant_frames >> PAGE_SHIFT)); version, xen_events_irq, &grant_frames);
if (xen_events_irq < 0)
return -ENODEV;
xen_domain_type = XEN_HVM_DOMAIN; xen_domain_type = XEN_HVM_DOMAIN;
xen_setup_features(); xen_setup_features();
...@@ -281,9 +311,21 @@ static int __init xen_guest_init(void) ...@@ -281,9 +311,21 @@ static int __init xen_guest_init(void)
disable_cpuidle(); disable_cpuidle();
disable_cpufreq(); disable_cpufreq();
xen_init_IRQ();
if (request_percpu_irq(xen_events_irq, xen_arm_callback,
"events", &xen_vcpu)) {
pr_err("Error request IRQ %d\n", xen_events_irq);
return -EINVAL;
}
xen_percpu_init();
register_cpu_notifier(&xen_cpu_notifier);
return 0; return 0;
} }
core_initcall(xen_guest_init); early_initcall(xen_guest_init);
static int __init xen_pm_init(void) static int __init xen_pm_init(void)
{ {
...@@ -297,31 +339,6 @@ static int __init xen_pm_init(void) ...@@ -297,31 +339,6 @@ static int __init xen_pm_init(void)
} }
late_initcall(xen_pm_init); late_initcall(xen_pm_init);
static irqreturn_t xen_arm_callback(int irq, void *arg)
{
xen_hvm_evtchn_do_upcall();
return IRQ_HANDLED;
}
static int __init xen_init_events(void)
{
if (!xen_domain() || xen_events_irq < 0)
return -ENODEV;
xen_init_IRQ();
if (request_percpu_irq(xen_events_irq, xen_arm_callback,
"events", &xen_vcpu)) {
pr_err("Error requesting IRQ %d\n", xen_events_irq);
return -EINVAL;
}
on_each_cpu(xen_percpu_init, NULL, 0);
return 0;
}
postcore_initcall(xen_init_events);
/* In the hypervisor.S file. */ /* In the hypervisor.S file. */
EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
......
...@@ -52,7 +52,8 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s, ...@@ -52,7 +52,8 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
extern int m2p_add_override(unsigned long mfn, struct page *page, extern int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op); struct gnttab_map_grant_ref *kmap_op);
extern int m2p_remove_override(struct page *page, extern int m2p_remove_override(struct page *page,
struct gnttab_map_grant_ref *kmap_op); struct gnttab_map_grant_ref *kmap_op,
unsigned long mfn);
extern struct page *m2p_find_override(unsigned long mfn); extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
......
...@@ -162,14 +162,15 @@ static int __init xlated_setup_gnttab_pages(void) ...@@ -162,14 +162,15 @@ static int __init xlated_setup_gnttab_pages(void)
rc = arch_gnttab_map_shared(pfns, nr_grant_frames, nr_grant_frames, rc = arch_gnttab_map_shared(pfns, nr_grant_frames, nr_grant_frames,
&xen_auto_xlat_grant_frames.vaddr); &xen_auto_xlat_grant_frames.vaddr);
kfree(pages);
if (rc) { if (rc) {
pr_warn("%s Couldn't map %ld pfns rc:%d\n", __func__, pr_warn("%s Couldn't map %ld pfns rc:%d\n", __func__,
nr_grant_frames, rc); nr_grant_frames, rc);
free_xenballooned_pages(nr_grant_frames, pages); free_xenballooned_pages(nr_grant_frames, pages);
kfree(pages);
kfree(pfns); kfree(pfns);
return rc; return rc;
} }
kfree(pages);
xen_auto_xlat_grant_frames.pfn = pfns; xen_auto_xlat_grant_frames.pfn = pfns;
xen_auto_xlat_grant_frames.count = nr_grant_frames; xen_auto_xlat_grant_frames.count = nr_grant_frames;
......
...@@ -899,13 +899,6 @@ int m2p_add_override(unsigned long mfn, struct page *page, ...@@ -899,13 +899,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
"m2p_add_override: pfn %lx not mapped", pfn)) "m2p_add_override: pfn %lx not mapped", pfn))
return -EINVAL; return -EINVAL;
} }
WARN_ON(PagePrivate(page));
SetPagePrivate(page);
set_page_private(page, mfn);
page->index = pfn_to_mfn(pfn);
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
return -ENOMEM;
if (kmap_op != NULL) { if (kmap_op != NULL) {
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
...@@ -944,19 +937,16 @@ int m2p_add_override(unsigned long mfn, struct page *page, ...@@ -944,19 +937,16 @@ int m2p_add_override(unsigned long mfn, struct page *page,
} }
EXPORT_SYMBOL_GPL(m2p_add_override); EXPORT_SYMBOL_GPL(m2p_add_override);
int m2p_remove_override(struct page *page, int m2p_remove_override(struct page *page,
struct gnttab_map_grant_ref *kmap_op) struct gnttab_map_grant_ref *kmap_op,
unsigned long mfn)
{ {
unsigned long flags; unsigned long flags;
unsigned long mfn;
unsigned long pfn; unsigned long pfn;
unsigned long uninitialized_var(address); unsigned long uninitialized_var(address);
unsigned level; unsigned level;
pte_t *ptep = NULL; pte_t *ptep = NULL;
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
mfn = get_phys_to_machine(pfn);
if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
return -EINVAL;
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
address = (unsigned long)__va(pfn << PAGE_SHIFT); address = (unsigned long)__va(pfn << PAGE_SHIFT);
...@@ -970,10 +960,7 @@ int m2p_remove_override(struct page *page, ...@@ -970,10 +960,7 @@ int m2p_remove_override(struct page *page,
spin_lock_irqsave(&m2p_override_lock, flags); spin_lock_irqsave(&m2p_override_lock, flags);
list_del(&page->lru); list_del(&page->lru);
spin_unlock_irqrestore(&m2p_override_lock, flags); spin_unlock_irqrestore(&m2p_override_lock, flags);
WARN_ON(!PagePrivate(page));
ClearPagePrivate(page);
set_phys_to_machine(pfn, page->index);
if (kmap_op != NULL) { if (kmap_op != NULL) {
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
struct multicall_space mcs; struct multicall_space mcs;
......
...@@ -285,8 +285,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, ...@@ -285,8 +285,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
!rb_next(&persistent_gnt->node)) { !rb_next(&persistent_gnt->node)) {
ret = gnttab_unmap_refs(unmap, NULL, pages, ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
segs_to_unmap);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap); put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0; segs_to_unmap = 0;
...@@ -321,8 +320,7 @@ static void unmap_purged_grants(struct work_struct *work) ...@@ -321,8 +320,7 @@ static void unmap_purged_grants(struct work_struct *work)
pages[segs_to_unmap] = persistent_gnt->page; pages[segs_to_unmap] = persistent_gnt->page;
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
ret = gnttab_unmap_refs(unmap, NULL, pages, ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
segs_to_unmap);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap); put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0; segs_to_unmap = 0;
...@@ -330,7 +328,7 @@ static void unmap_purged_grants(struct work_struct *work) ...@@ -330,7 +328,7 @@ static void unmap_purged_grants(struct work_struct *work)
kfree(persistent_gnt); kfree(persistent_gnt);
} }
if (segs_to_unmap > 0) { if (segs_to_unmap > 0) {
ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap); put_free_pages(blkif, pages, segs_to_unmap);
} }
...@@ -670,15 +668,14 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif, ...@@ -670,15 +668,14 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
GNTMAP_host_map, pages[i]->handle); GNTMAP_host_map, pages[i]->handle);
pages[i]->handle = BLKBACK_INVALID_HANDLE; pages[i]->handle = BLKBACK_INVALID_HANDLE;
if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
invcount);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount); put_free_pages(blkif, unmap_pages, invcount);
invcount = 0; invcount = 0;
} }
} }
if (invcount) { if (invcount) {
ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount); put_free_pages(blkif, unmap_pages, invcount);
} }
...@@ -740,7 +737,7 @@ static int xen_blkbk_map(struct xen_blkif *blkif, ...@@ -740,7 +737,7 @@ static int xen_blkbk_map(struct xen_blkif *blkif,
} }
if (segs_to_map) { if (segs_to_map) {
ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map);
BUG_ON(ret); BUG_ON(ret);
} }
......
...@@ -284,8 +284,10 @@ static int map_grant_pages(struct grant_map *map) ...@@ -284,8 +284,10 @@ static int map_grant_pages(struct grant_map *map)
} }
pr_debug("map %d+%d\n", map->index, map->count); pr_debug("map %d+%d\n", map->index, map->count);
err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, err = gnttab_map_refs_userspace(map->map_ops,
map->pages, map->count); use_ptemod ? map->kmap_ops : NULL,
map->pages,
map->count);
if (err) if (err)
return err; return err;
...@@ -315,8 +317,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) ...@@ -315,8 +317,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
} }
} }
err = gnttab_unmap_refs(map->unmap_ops + offset, err = gnttab_unmap_refs_userspace(map->unmap_ops + offset,
use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset, use_ptemod ? map->kmap_ops + offset : NULL,
map->pages + offset,
pages); pages);
if (err) if (err)
return err; return err;
......
...@@ -837,7 +837,7 @@ unsigned int gnttab_max_grant_frames(void) ...@@ -837,7 +837,7 @@ unsigned int gnttab_max_grant_frames(void)
} }
EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
int gnttab_setup_auto_xlat_frames(unsigned long addr) int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
{ {
xen_pfn_t *pfn; xen_pfn_t *pfn;
unsigned int max_nr_gframes = __max_nr_grant_frames(); unsigned int max_nr_gframes = __max_nr_grant_frames();
...@@ -849,8 +849,8 @@ int gnttab_setup_auto_xlat_frames(unsigned long addr) ...@@ -849,8 +849,8 @@ int gnttab_setup_auto_xlat_frames(unsigned long addr)
vaddr = xen_remap(addr, PAGE_SIZE * max_nr_gframes); vaddr = xen_remap(addr, PAGE_SIZE * max_nr_gframes);
if (vaddr == NULL) { if (vaddr == NULL) {
pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n", pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
addr); &addr);
return -ENOMEM; return -ENOMEM;
} }
pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL); pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
...@@ -928,15 +928,17 @@ void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) ...@@ -928,15 +928,17 @@ void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
} }
EXPORT_SYMBOL_GPL(gnttab_batch_copy); EXPORT_SYMBOL_GPL(gnttab_batch_copy);
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count) struct page **pages, unsigned int count,
bool m2p_override)
{ {
int i, ret; int i, ret;
bool lazy = false; bool lazy = false;
pte_t *pte; pte_t *pte;
unsigned long mfn; unsigned long mfn, pfn;
BUG_ON(kmap_ops && !m2p_override);
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
if (ret) if (ret)
return ret; return ret;
...@@ -955,10 +957,12 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, ...@@ -955,10 +957,12 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
map_ops[i].dev_bus_addr >> PAGE_SHIFT); map_ops[i].dev_bus_addr >> PAGE_SHIFT);
} }
return ret; return 0;
} }
if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { if (m2p_override &&
!in_interrupt() &&
paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
lazy = true; lazy = true;
} }
...@@ -975,6 +979,18 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, ...@@ -975,6 +979,18 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
} else { } else {
mfn = PFN_DOWN(map_ops[i].dev_bus_addr); mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
} }
pfn = page_to_pfn(pages[i]);
WARN_ON(PagePrivate(pages[i]));
SetPagePrivate(pages[i]);
set_page_private(pages[i], mfn);
pages[i]->index = pfn_to_mfn(pfn);
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
ret = -ENOMEM;
goto out;
}
if (m2p_override)
ret = m2p_add_override(mfn, pages[i], kmap_ops ? ret = m2p_add_override(mfn, pages[i], kmap_ops ?
&kmap_ops[i] : NULL); &kmap_ops[i] : NULL);
if (ret) if (ret)
...@@ -987,15 +1003,32 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, ...@@ -987,15 +1003,32 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
return ret; return ret;
} }
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct page **pages, unsigned int count)
{
return __gnttab_map_refs(map_ops, NULL, pages, count, false);
}
EXPORT_SYMBOL_GPL(gnttab_map_refs); EXPORT_SYMBOL_GPL(gnttab_map_refs);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count) struct page **pages, unsigned int count)
{
return __gnttab_map_refs(map_ops, kmap_ops, pages, count, true);
}
EXPORT_SYMBOL_GPL(gnttab_map_refs_userspace);
int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count,
bool m2p_override)
{ {
int i, ret; int i, ret;
bool lazy = false; bool lazy = false;
unsigned long pfn, mfn;
BUG_ON(kmap_ops && !m2p_override);
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
if (ret) if (ret)
return ret; return ret;
...@@ -1006,17 +1039,33 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, ...@@ -1006,17 +1039,33 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
INVALID_P2M_ENTRY); INVALID_P2M_ENTRY);
} }
return ret; return 0;
} }
if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { if (m2p_override &&
!in_interrupt() &&
paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
lazy = true; lazy = true;
} }
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ret = m2p_remove_override(pages[i], kmap_ops ? pfn = page_to_pfn(pages[i]);
&kmap_ops[i] : NULL); mfn = get_phys_to_machine(pfn);
if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
ret = -EINVAL;
goto out;
}
set_page_private(pages[i], INVALID_P2M_ENTRY);
WARN_ON(!PagePrivate(pages[i]));
ClearPagePrivate(pages[i]);
set_phys_to_machine(pfn, pages[i]->index);
if (m2p_override)
ret = m2p_remove_override(pages[i],
kmap_ops ?
&kmap_ops[i] : NULL,
mfn);
if (ret) if (ret)
goto out; goto out;
} }
...@@ -1027,8 +1076,22 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, ...@@ -1027,8 +1076,22 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
return ret; return ret;
} }
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *map_ops,
struct page **pages, unsigned int count)
{
return __gnttab_unmap_refs(map_ops, NULL, pages, count, false);
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs); EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count)
{
return __gnttab_unmap_refs(map_ops, kmap_ops, pages, count, true);
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs_userspace);
static unsigned nr_status_frames(unsigned nr_grant_frames) static unsigned nr_status_frames(unsigned nr_grant_frames)
{ {
BUG_ON(grefs_per_grant_frame == 0); BUG_ON(grefs_per_grant_frame == 0);
......
...@@ -75,14 +75,32 @@ static unsigned long xen_io_tlb_nslabs; ...@@ -75,14 +75,32 @@ static unsigned long xen_io_tlb_nslabs;
static u64 start_dma_addr; static u64 start_dma_addr;
/*
* Both of these functions should avoid PFN_PHYS because phys_addr_t
* can be 32bit when dma_addr_t is 64bit leading to a loss in
* information if the shift is done before casting to 64bit.
*/
static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
{ {
return phys_to_machine(XPADDR(paddr)).maddr; unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr));
dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT;
dma |= paddr & ~PAGE_MASK;
return dma;
} }
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
{ {
return machine_to_phys(XMADDR(baddr)).paddr; unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr));
dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
phys_addr_t paddr = dma;
BUG_ON(paddr != dma); /* truncation has occurred, should never happen */
paddr |= baddr & ~PAGE_MASK;
return paddr;
} }
static inline dma_addr_t xen_virt_to_bus(void *address) static inline dma_addr_t xen_virt_to_bus(void *address)
......
...@@ -175,6 +175,7 @@ static void frontswap_selfshrink(void) ...@@ -175,6 +175,7 @@ static void frontswap_selfshrink(void)
#endif /* CONFIG_FRONTSWAP */ #endif /* CONFIG_FRONTSWAP */
#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
#define PAGES2MB(pages) ((pages) >> (20 - PAGE_SHIFT))
/* /*
* Use current balloon size, the goal (vm_committed_as), and hysteresis * Use current balloon size, the goal (vm_committed_as), and hysteresis
...@@ -525,6 +526,7 @@ EXPORT_SYMBOL(register_xen_selfballooning); ...@@ -525,6 +526,7 @@ EXPORT_SYMBOL(register_xen_selfballooning);
int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink) int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink)
{ {
bool enable = false; bool enable = false;
unsigned long reserve_pages;
if (!xen_domain()) if (!xen_domain())
return -ENODEV; return -ENODEV;
...@@ -549,6 +551,26 @@ int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink) ...@@ -549,6 +551,26 @@ int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink)
if (!enable) if (!enable)
return -ENODEV; return -ENODEV;
/*
* Give selfballoon_reserved_mb a default value(10% of total ram pages)
* to make selfballoon not so aggressive.
*
* There are mainly two reasons:
* 1) The original goal_page didn't consider some pages used by kernel
* space, like slab pages and memory used by device drivers.
*
* 2) The balloon driver may not give back memory to guest OS fast
* enough when the workload suddenly aquries a lot of physical memory.
*
* In both cases, the guest OS will suffer from memory pressure and
* OOM killer may be triggered.
* By reserving extra 10% of total ram pages, we can keep the system
* much more reliably and response faster in some cases.
*/
if (!selfballoon_reserved_mb) {
reserve_pages = totalram_pages / 10;
selfballoon_reserved_mb = PAGES2MB(reserve_pages);
}
schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ); schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ);
return 0; return 0;
......
...@@ -185,15 +185,19 @@ struct grant_frames { ...@@ -185,15 +185,19 @@ struct grant_frames {
}; };
extern struct grant_frames xen_auto_xlat_grant_frames; extern struct grant_frames xen_auto_xlat_grant_frames;
unsigned int gnttab_max_grant_frames(void); unsigned int gnttab_max_grant_frames(void);
int gnttab_setup_auto_xlat_frames(unsigned long addr); int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
void gnttab_free_auto_xlat_frames(void); void gnttab_free_auto_xlat_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct page **pages, unsigned int count);
int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct page **pages, unsigned int count);
int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kunmap_ops, struct gnttab_map_grant_ref *kunmap_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment