Commit 7ee2d2d6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'stable/for-linus-3.17-b-rc4-tag' of...

Merge tag 'stable/for-linus-3.17-b-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull Xen bug fixes from David Vrabel:
 - fix for PVHVM suspend/resume and migration
 - don't pointlessly retry certain ballooning ops
 - fix gntalloc when grefs have run out.
 - fix PV boot if KSALR is enable or very large modules are used.

* tag 'stable/for-linus-3.17-b-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  x86/xen: don't copy bogus duplicate entries into kernel page tables
  xen/gntalloc: safely delete grefs in add_grefs() undo path
  xen/gntalloc: fix oops after runnning out of grant refs
  xen/balloon: cancel ballooning if adding new memory failed
  xen/manage: Always freeze/thaw processes when suspend/resuming
parents 018cace2 0b5a5063
...@@ -19,6 +19,7 @@ extern pud_t level3_ident_pgt[512]; ...@@ -19,6 +19,7 @@ extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512]; extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512]; extern pmd_t level2_fixmap_pgt[512];
extern pmd_t level2_ident_pgt[512]; extern pmd_t level2_ident_pgt[512];
extern pte_t level1_fixmap_pgt[512];
extern pgd_t init_level4_pgt[]; extern pgd_t init_level4_pgt[];
#define swapper_pg_dir init_level4_pgt #define swapper_pg_dir init_level4_pgt
......
...@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, ...@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
* *
* We can construct this by grafting the Xen provided pagetable into * We can construct this by grafting the Xen provided pagetable into
* head_64.S's preconstructed pagetables. We copy the Xen L2's into * head_64.S's preconstructed pagetables. We copy the Xen L2's into
* level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This * level2_ident_pgt, and level2_kernel_pgt. This means that only the
* means that only the kernel has a physical mapping to start with - * kernel has a physical mapping to start with - but that's enough to
* but that's enough to get __va working. We need to fill in the rest * get __va working. We need to fill in the rest of the physical
* of the physical mapping once some sort of allocator has been set * mapping once some sort of allocator has been set up. NOTE: for
* up. * PVH, the page tables are native.
* NOTE: for PVH, the page tables are native.
*/ */
void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
{ {
...@@ -1902,8 +1901,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1902,8 +1901,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* L3_i[0] -> level2_ident_pgt */ /* L3_i[0] -> level2_ident_pgt */
convert_pfn_mfn(level3_ident_pgt); convert_pfn_mfn(level3_ident_pgt);
/* L3_k[510] -> level2_kernel_pgt /* L3_k[510] -> level2_kernel_pgt
* L3_i[511] -> level2_fixmap_pgt */ * L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt); convert_pfn_mfn(level3_kernel_pgt);
/* L3_k[511][506] -> level1_fixmap_pgt */
convert_pfn_mfn(level2_fixmap_pgt);
} }
/* We get [511][511] and have Xen's version of level2_kernel_pgt */ /* We get [511][511] and have Xen's version of level2_kernel_pgt */
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
...@@ -1913,21 +1915,15 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1913,21 +1915,15 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
addr[1] = (unsigned long)l3; addr[1] = (unsigned long)l3;
addr[2] = (unsigned long)l2; addr[2] = (unsigned long)l2;
/* Graft it onto L4[272][0]. Note that we creating an aliasing problem: /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
* Both L4[272][0] and L4[511][511] have entries that point to the same * Both L4[272][0] and L4[511][510] have entries that point to the same
* L2 (PMD) tables. Meaning that if you modify it in __va space * L2 (PMD) tables. Meaning that if you modify it in __va space
* it will be also modified in the __ka space! (But if you just * it will be also modified in the __ka space! (But if you just
* modify the PMD table to point to other PTE's or none, then you * modify the PMD table to point to other PTE's or none, then you
* are OK - which is what cleanup_highmap does) */ * are OK - which is what cleanup_highmap does) */
copy_page(level2_ident_pgt, l2); copy_page(level2_ident_pgt, l2);
/* Graft it onto L4[511][511] */ /* Graft it onto L4[511][510] */
copy_page(level2_kernel_pgt, l2); copy_page(level2_kernel_pgt, l2);
/* Get [511][510] and graft that in level2_fixmap_pgt */
l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
copy_page(level2_fixmap_pgt, l2);
/* Note that we don't do anything with level1_fixmap_pgt which
* we don't need. */
if (!xen_feature(XENFEAT_auto_translated_physmap)) { if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* Make pagetable pieces RO */ /* Make pagetable pieces RO */
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
...@@ -1937,6 +1933,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1937,6 +1933,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
/* Pin down new L4 */ /* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
......
...@@ -230,8 +230,8 @@ static enum bp_state reserve_additional_memory(long credit) ...@@ -230,8 +230,8 @@ static enum bp_state reserve_additional_memory(long credit)
rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT); rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
if (rc) { if (rc) {
pr_info("%s: add_memory() failed: %i\n", __func__, rc); pr_warn("Cannot add additional memory (%i)\n", rc);
return BP_EAGAIN; return BP_ECANCELED;
} }
balloon_hotplug -= credit; balloon_hotplug -= credit;
......
...@@ -124,7 +124,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, ...@@ -124,7 +124,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
int i, rc, readonly; int i, rc, readonly;
LIST_HEAD(queue_gref); LIST_HEAD(queue_gref);
LIST_HEAD(queue_file); LIST_HEAD(queue_file);
struct gntalloc_gref *gref; struct gntalloc_gref *gref, *next;
readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE); readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
rc = -ENOMEM; rc = -ENOMEM;
...@@ -141,13 +141,11 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, ...@@ -141,13 +141,11 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
goto undo; goto undo;
/* Grant foreign access to the page. */ /* Grant foreign access to the page. */
gref->gref_id = gnttab_grant_foreign_access(op->domid, rc = gnttab_grant_foreign_access(op->domid,
pfn_to_mfn(page_to_pfn(gref->page)), readonly); pfn_to_mfn(page_to_pfn(gref->page)), readonly);
if ((int)gref->gref_id < 0) { if (rc < 0)
rc = gref->gref_id;
goto undo; goto undo;
} gref_ids[i] = gref->gref_id = rc;
gref_ids[i] = gref->gref_id;
} }
/* Add to gref lists. */ /* Add to gref lists. */
...@@ -162,8 +160,8 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, ...@@ -162,8 +160,8 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
mutex_lock(&gref_mutex); mutex_lock(&gref_mutex);
gref_size -= (op->count - i); gref_size -= (op->count - i);
list_for_each_entry(gref, &queue_file, next_file) { list_for_each_entry_safe(gref, next, &queue_file, next_file) {
/* __del_gref does not remove from queue_file */ list_del(&gref->next_file);
__del_gref(gref); __del_gref(gref);
} }
...@@ -193,7 +191,7 @@ static void __del_gref(struct gntalloc_gref *gref) ...@@ -193,7 +191,7 @@ static void __del_gref(struct gntalloc_gref *gref)
gref->notify.flags = 0; gref->notify.flags = 0;
if (gref->gref_id > 0) { if (gref->gref_id) {
if (gnttab_query_foreign_access(gref->gref_id)) if (gnttab_query_foreign_access(gref->gref_id))
return; return;
......
...@@ -103,16 +103,11 @@ static void do_suspend(void) ...@@ -103,16 +103,11 @@ static void do_suspend(void)
shutting_down = SHUTDOWN_SUSPEND; shutting_down = SHUTDOWN_SUSPEND;
#ifdef CONFIG_PREEMPT
/* If the kernel is preemptible, we need to freeze all the processes
to prevent them from being in the middle of a pagetable update
during suspend. */
err = freeze_processes(); err = freeze_processes();
if (err) { if (err) {
pr_err("%s: freeze failed %d\n", __func__, err); pr_err("%s: freeze failed %d\n", __func__, err);
goto out; goto out;
} }
#endif
err = dpm_suspend_start(PMSG_FREEZE); err = dpm_suspend_start(PMSG_FREEZE);
if (err) { if (err) {
...@@ -157,10 +152,8 @@ static void do_suspend(void) ...@@ -157,10 +152,8 @@ static void do_suspend(void)
dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
out_thaw: out_thaw:
#ifdef CONFIG_PREEMPT
thaw_processes(); thaw_processes();
out: out:
#endif
shutting_down = SHUTDOWN_INVALID; shutting_down = SHUTDOWN_INVALID;
} }
#endif /* CONFIG_HIBERNATE_CALLBACKS */ #endif /* CONFIG_HIBERNATE_CALLBACKS */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment