Commit 511ba86e authored by Boris Ostrovsky's avatar Boris Ostrovsky Committed by H. Peter Anvin

x86, mm: Patch out arch_flush_lazy_mmu_mode() when running on bare metal

Invoking arch_flush_lazy_mmu_mode() results in calls to
preempt_enable()/disable() which may have performance impact.

Since lazy MMU is not used on bare metal we can patch away
arch_flush_lazy_mmu_mode() so that it is never called in such
environment.

[ hpa: the previous patch "Fix vmalloc_fault oops during lazy MMU
  updates" may cause a minor performance regression on
  bare metal.  This patch resolves that performance regression.  It is
  somewhat unclear to me if this is a good -stable candidate. ]
Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Link: http://lkml.kernel.org/r/1364045796-10720-2-git-send-email-konrad.wilk@oracle.comTested-by: default avatarJosh Boyer <jwboyer@redhat.com>
Tested-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Acked-by: default avatarBorislav Petkov <bp@suse.de>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
Cc: <stable@vger.kernel.org> SEE NOTE ABOVE
parent 1160c277
...@@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void) ...@@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
} }
void arch_flush_lazy_mmu_mode(void); static inline void arch_flush_lazy_mmu_mode(void)
{
PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
}
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags) phys_addr_t phys, pgprot_t flags)
......
...@@ -91,6 +91,7 @@ struct pv_lazy_ops { ...@@ -91,6 +91,7 @@ struct pv_lazy_ops {
/* Set deferred update mode, used for batching operations. */ /* Set deferred update mode, used for batching operations. */
void (*enter)(void); void (*enter)(void);
void (*leave)(void); void (*leave)(void);
void (*flush)(void);
}; };
struct pv_time_ops { struct pv_time_ops {
...@@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next); ...@@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next);
void paravirt_enter_lazy_mmu(void); void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void); void paravirt_leave_lazy_mmu(void);
void paravirt_flush_lazy_mmu(void);
void _paravirt_nop(void); void _paravirt_nop(void);
u32 _paravirt_ident_32(u32); u32 _paravirt_ident_32(u32);
......
...@@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void) ...@@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
leave_lazy(PARAVIRT_LAZY_MMU); leave_lazy(PARAVIRT_LAZY_MMU);
} }
void paravirt_flush_lazy_mmu(void)
{
preempt_disable();
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
preempt_enable();
}
void paravirt_start_context_switch(struct task_struct *prev) void paravirt_start_context_switch(struct task_struct *prev)
{ {
BUG_ON(preemptible()); BUG_ON(preemptible());
...@@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) ...@@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return this_cpu_read(paravirt_lazy_mode); return this_cpu_read(paravirt_lazy_mode);
} }
void arch_flush_lazy_mmu_mode(void)
{
preempt_disable();
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
preempt_enable();
}
struct pv_info pv_info = { struct pv_info pv_info = {
.name = "bare hardware", .name = "bare hardware",
.paravirt_enabled = 0, .paravirt_enabled = 0,
...@@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = { ...@@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
.lazy_mode = { .lazy_mode = {
.enter = paravirt_nop, .enter = paravirt_nop,
.leave = paravirt_nop, .leave = paravirt_nop,
.flush = paravirt_nop,
}, },
.set_fixmap = native_set_fixmap, .set_fixmap = native_set_fixmap,
......
...@@ -1334,6 +1334,7 @@ __init void lguest_init(void) ...@@ -1334,6 +1334,7 @@ __init void lguest_init(void)
pv_mmu_ops.read_cr3 = lguest_read_cr3; pv_mmu_ops.read_cr3 = lguest_read_cr3;
pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
pv_mmu_ops.pte_update = lguest_pte_update; pv_mmu_ops.pte_update = lguest_pte_update;
pv_mmu_ops.pte_update_defer = lguest_pte_update; pv_mmu_ops.pte_update_defer = lguest_pte_update;
......
...@@ -2196,6 +2196,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { ...@@ -2196,6 +2196,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.lazy_mode = { .lazy_mode = {
.enter = paravirt_enter_lazy_mmu, .enter = paravirt_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu, .leave = xen_leave_lazy_mmu,
.flush = paravirt_flush_lazy_mmu,
}, },
.set_fixmap = xen_set_fixmap, .set_fixmap = xen_set_fixmap,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment