Commit a60d4b98 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'stable/for-linus-3.12-rc0-tag-two' of...

Merge tag 'stable/for-linus-3.12-rc0-tag-two' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull Xen bug-fixes from Konrad Rzeszutek Wilk:
 "This pull I usually do after rc1 is out but because we have a nice
  amount of fixes, some bootup related fixes for ARM, and it is early in
  the cycle we figured to do it now to help with tracking of potential
  regressions.

  The simple ones are the ARM ones - one of the patches fell through the
  cracks, other fixes a bootup issue (unconditionally using Xen
  functions).  Then a fix for a regression causing preempt count being
  off (patch causing this went in v3.12).

  Lastly are the fixes to make Xen PVHVM guests use PV ticketlocks (Xen
  PV already does).

  The enablement of that was supposed to be part of the x86 spinlock
  merge in commit 816434ec ("The biggest change here are
  paravirtualized ticket spinlocks (PV spinlocks), which bring a nice
  speedup on various benchmarks...") but unfortunatly it would cause
  hang when booting Xen PVHVM guests.  Yours truly got all of the bugs
  fixed last week and they (six of them) are included in this pull.

  Bug-fixes:
   - Boot on ARM without using Xen unconditionally
   - On Xen ARM don't run cpuidle/cpufreq
   - Fix regression in balloon driver, preempt count warnings
   - Fixes to make PVHVM able to use pv ticketlock.
   - Revert Xen PVHVM disabling pv ticketlock (aka, re-enable pv ticketlocks)"

* tag 'stable/for-linus-3.12-rc0-tag-two' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/spinlock: Don't use __initdate for xen_pv_spin
  Revert "xen/spinlock: Disable IRQ spinlock (PV) allocation on PVHVM"
  xen/spinlock: Don't setup xen spinlock IPI kicker if disabled.
  xen/smp: Update pv_lock_ops functions before alternative code starts under PVHVM
  xen/spinlock: We don't need the old structure anymore
  xen/spinlock: Fix locking path engaging too soon under PVHVM.
  xen/arm: disable cpuidle and cpufreq when linux is running as dom0
  xen/p2m: Don't call get_balloon_scratch_page() twice, keep interrupts disabled for multicalls
  ARM: xen: only set pm function ptrs for Xen guests
parents fa1586a7 c3b7cb1f
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/cpuidle.h>
#include <linux/cpufreq.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -267,18 +269,28 @@ static int __init xen_guest_init(void) ...@@ -267,18 +269,28 @@ static int __init xen_guest_init(void)
if (!xen_initial_domain()) if (!xen_initial_domain())
xenbus_probe(NULL); xenbus_probe(NULL);
/*
* Making sure board specific code will not set up ops for
* cpu idle and cpu freq.
*/
disable_cpuidle();
disable_cpufreq();
return 0; return 0;
} }
core_initcall(xen_guest_init); core_initcall(xen_guest_init);
static int __init xen_pm_init(void) static int __init xen_pm_init(void)
{ {
if (!xen_domain())
return -ENODEV;
pm_power_off = xen_power_off; pm_power_off = xen_power_off;
arm_pm_restart = xen_restart; arm_pm_restart = xen_restart;
return 0; return 0;
} }
subsys_initcall(xen_pm_init); late_initcall(xen_pm_init);
static irqreturn_t xen_arm_callback(int irq, void *arg) static irqreturn_t xen_arm_callback(int irq, void *arg)
{ {
......
...@@ -1692,7 +1692,6 @@ static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action, ...@@ -1692,7 +1692,6 @@ static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
xen_vcpu_setup(cpu); xen_vcpu_setup(cpu);
if (xen_have_vector_callback) { if (xen_have_vector_callback) {
xen_init_lock_cpu(cpu);
if (xen_feature(XENFEAT_hvm_safe_pvclock)) if (xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_timer(cpu); xen_setup_timer(cpu);
} }
......
...@@ -990,10 +990,13 @@ int m2p_remove_override(struct page *page, ...@@ -990,10 +990,13 @@ int m2p_remove_override(struct page *page,
printk(KERN_WARNING "m2p_remove_override: " printk(KERN_WARNING "m2p_remove_override: "
"pfn %lx mfn %lx, failed to modify kernel mappings", "pfn %lx mfn %lx, failed to modify kernel mappings",
pfn, mfn); pfn, mfn);
put_balloon_scratch_page();
return -1; return -1;
} }
mcs = xen_mc_entry( xen_mc_batch();
mcs = __xen_mc_entry(
sizeof(struct gnttab_unmap_and_replace)); sizeof(struct gnttab_unmap_and_replace));
unmap_op = mcs.args; unmap_op = mcs.args;
unmap_op->host_addr = kmap_op->host_addr; unmap_op->host_addr = kmap_op->host_addr;
...@@ -1003,12 +1006,11 @@ int m2p_remove_override(struct page *page, ...@@ -1003,12 +1006,11 @@ int m2p_remove_override(struct page *page,
MULTI_grant_table_op(mcs.mc, MULTI_grant_table_op(mcs.mc,
GNTTABOP_unmap_and_replace, unmap_op, 1); GNTTABOP_unmap_and_replace, unmap_op, 1);
xen_mc_issue(PARAVIRT_LAZY_MMU);
mcs = __xen_mc_entry(0); mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, scratch_page_address, MULTI_update_va_mapping(mcs.mc, scratch_page_address,
pfn_pte(page_to_pfn(get_balloon_scratch_page()), pfn_pte(page_to_pfn(scratch_page),
PAGE_KERNEL_RO), 0); PAGE_KERNEL_RO), 0);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(PARAVIRT_LAZY_MMU);
kmap_op->host_addr = 0; kmap_op->host_addr = 0;
......
...@@ -273,12 +273,20 @@ static void __init xen_smp_prepare_boot_cpu(void) ...@@ -273,12 +273,20 @@ static void __init xen_smp_prepare_boot_cpu(void)
BUG_ON(smp_processor_id() != 0); BUG_ON(smp_processor_id() != 0);
native_smp_prepare_boot_cpu(); native_smp_prepare_boot_cpu();
/* We've switched to the "real" per-cpu gdt, so make sure the if (xen_pv_domain()) {
old memory can be recycled */ /* We've switched to the "real" per-cpu gdt, so make sure the
make_lowmem_page_readwrite(xen_initial_gdt); old memory can be recycled */
make_lowmem_page_readwrite(xen_initial_gdt);
xen_filter_cpu_maps(); xen_filter_cpu_maps();
xen_setup_vcpu_info_placement(); xen_setup_vcpu_info_placement();
}
/*
* The alternative logic (which patches the unlock/lock) runs before
* the smp bootup up code is activated. Hence we need to set this up
* the core kernel is being patched. Otherwise we will have only
* modules patched but not core code.
*/
xen_init_spinlocks(); xen_init_spinlocks();
} }
...@@ -709,6 +717,15 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) ...@@ -709,6 +717,15 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
WARN_ON(rc); WARN_ON(rc);
if (!rc) if (!rc)
rc = native_cpu_up(cpu, tidle); rc = native_cpu_up(cpu, tidle);
/*
* We must initialize the slowpath CPU kicker _after_ the native
* path has executed. If we initialized it before none of the
* unlocker IPI kicks would reach the booting CPU as the booting
* CPU had not set itself 'online' in cpu_online_mask. That mask
* is checked when IPIs are sent (on HVM at least).
*/
xen_init_lock_cpu(cpu);
return rc; return rc;
} }
...@@ -728,4 +745,5 @@ void __init xen_hvm_smp_init(void) ...@@ -728,4 +745,5 @@ void __init xen_hvm_smp_init(void)
smp_ops.cpu_die = xen_hvm_cpu_die; smp_ops.cpu_die = xen_hvm_cpu_die;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
} }
...@@ -81,7 +81,6 @@ static inline void spin_time_accum_blocked(u64 start) ...@@ -81,7 +81,6 @@ static inline void spin_time_accum_blocked(u64 start)
spinlock_stats.time_blocked += delta; spinlock_stats.time_blocked += delta;
} }
#else /* !CONFIG_XEN_DEBUG_FS */ #else /* !CONFIG_XEN_DEBUG_FS */
#define TIMEOUT (1 << 10)
static inline void add_stats(enum xen_contention_stat var, u32 val) static inline void add_stats(enum xen_contention_stat var, u32 val)
{ {
} }
...@@ -96,23 +95,6 @@ static inline void spin_time_accum_blocked(u64 start) ...@@ -96,23 +95,6 @@ static inline void spin_time_accum_blocked(u64 start)
} }
#endif /* CONFIG_XEN_DEBUG_FS */ #endif /* CONFIG_XEN_DEBUG_FS */
/*
* Size struct xen_spinlock so it's the same as arch_spinlock_t.
*/
#if NR_CPUS < 256
typedef u8 xen_spinners_t;
# define inc_spinners(xl) \
asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory");
# define dec_spinners(xl) \
asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory");
#else
typedef u16 xen_spinners_t;
# define inc_spinners(xl) \
asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory");
# define dec_spinners(xl) \
asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
#endif
struct xen_lock_waiting { struct xen_lock_waiting {
struct arch_spinlock *lock; struct arch_spinlock *lock;
__ticket_t want; __ticket_t want;
...@@ -123,6 +105,7 @@ static DEFINE_PER_CPU(char *, irq_name); ...@@ -123,6 +105,7 @@ static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
static cpumask_t waiting_cpus; static cpumask_t waiting_cpus;
static bool xen_pvspin = true;
static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{ {
int irq = __this_cpu_read(lock_kicker_irq); int irq = __this_cpu_read(lock_kicker_irq);
...@@ -241,16 +224,12 @@ void xen_init_lock_cpu(int cpu) ...@@ -241,16 +224,12 @@ void xen_init_lock_cpu(int cpu)
int irq; int irq;
char *name; char *name;
if (!xen_pvspin)
return;
WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
cpu, per_cpu(lock_kicker_irq, cpu)); cpu, per_cpu(lock_kicker_irq, cpu));
/*
* See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
* (xen: disable PV spinlocks on HVM)
*/
if (xen_hvm_domain())
return;
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
cpu, cpu,
...@@ -270,11 +249,7 @@ void xen_init_lock_cpu(int cpu) ...@@ -270,11 +249,7 @@ void xen_init_lock_cpu(int cpu)
void xen_uninit_lock_cpu(int cpu) void xen_uninit_lock_cpu(int cpu)
{ {
/* if (!xen_pvspin)
* See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
* (xen: disable PV spinlocks on HVM)
*/
if (xen_hvm_domain())
return; return;
unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
...@@ -283,16 +258,9 @@ void xen_uninit_lock_cpu(int cpu) ...@@ -283,16 +258,9 @@ void xen_uninit_lock_cpu(int cpu)
per_cpu(irq_name, cpu) = NULL; per_cpu(irq_name, cpu) = NULL;
} }
static bool xen_pvspin __initdata = true;
void __init xen_init_spinlocks(void) void __init xen_init_spinlocks(void)
{ {
/*
* See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
* (xen: disable PV spinlocks on HVM)
*/
if (xen_hvm_domain())
return;
if (!xen_pvspin) { if (!xen_pvspin) {
printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
...@@ -323,6 +291,9 @@ static int __init xen_spinlock_debugfs(void) ...@@ -323,6 +291,9 @@ static int __init xen_spinlock_debugfs(void)
if (d_xen == NULL) if (d_xen == NULL)
return -ENOMEM; return -ENOMEM;
if (!xen_pvspin)
return 0;
d_spin_debug = debugfs_create_dir("spinlocks", d_xen); d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment