Commit 4012e77a authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Thomas Gleixner

x86/nmi: Fix NMI uaccess race against CR3 switching


A NMI can hit in the middle of context switching or in the middle of
switch_mm_irqs_off().  In either case, CR3 might not match current->mm,
which could cause copy_from_user_nmi() and friends to read the wrong
memory.

Fix it by adding a new nmi_uaccess_okay() helper and checking it in
copy_from_user_nmi() and in __copy_from_user_nmi()'s callers.
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarRik van Riel <riel@surriel.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jann Horn <jannh@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/dd956eba16646fd0b15c3c0741269dfd84452dac.1535557289.git.luto@kernel.org
parent 829fe4aa
...@@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs ...@@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
perf_callchain_store(entry, regs->ip); perf_callchain_store(entry, regs->ip);
if (!current->mm) if (!nmi_uaccess_okay())
return; return;
if (perf_callchain_user32(regs, entry)) if (perf_callchain_user32(regs, entry))
......
...@@ -175,8 +175,16 @@ struct tlb_state { ...@@ -175,8 +175,16 @@ struct tlb_state {
* are on. This means that it may not match current->active_mm, * are on. This means that it may not match current->active_mm,
* which will contain the previous user mm when we're in lazy TLB * which will contain the previous user mm when we're in lazy TLB
* mode even if we've already switched back to swapper_pg_dir. * mode even if we've already switched back to swapper_pg_dir.
*
* During switch_mm_irqs_off(), loaded_mm will be set to
* LOADED_MM_SWITCHING during the brief interrupts-off window
* when CR3 and loaded_mm would otherwise be inconsistent. This
* is for nmi_uaccess_okay()'s benefit.
*/ */
struct mm_struct *loaded_mm; struct mm_struct *loaded_mm;
#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
u16 loaded_mm_asid; u16 loaded_mm_asid;
u16 next_asid; u16 next_asid;
/* last user mm's ctx id */ /* last user mm's ctx id */
...@@ -246,6 +254,38 @@ struct tlb_state { ...@@ -246,6 +254,38 @@ struct tlb_state {
}; };
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
/*
* Blindly accessing user memory from NMI context can be dangerous
* if we're in the middle of switching the current user task or
* switching the loaded mm. It can also be dangerous if we
* interrupted some kernel code that was temporarily using a
* different mm.
*/
static inline bool nmi_uaccess_okay(void)
{
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
struct mm_struct *current_mm = current->mm;
VM_WARN_ON_ONCE(!loaded_mm);
/*
* The condition we want to check is
* current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
* if we're running in a VM with shadow paging, and nmi_uaccess_okay()
* is supposed to be reasonably fast.
*
* Instead, we check the almost equivalent but somewhat conservative
* condition below, and we rely on the fact that switch_mm_irqs_off()
* sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
*/
if (loaded_mm != current_mm)
return false;
VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
return true;
}
/* Initialize cr4 shadow for this CPU. */ /* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void) static inline void cr4_init_shadow(void)
{ {
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/export.h> #include <linux/export.h>
#include <asm/tlbflush.h>
/* /*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the * We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2. * nested NMI paths are careful to preserve CR2.
...@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) ...@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
if (__range_not_ok(from, n, TASK_SIZE)) if (__range_not_ok(from, n, TASK_SIZE))
return n; return n;
if (!nmi_uaccess_okay())
return n;
/* /*
* Even though this function is typically called from NMI/IRQ context * Even though this function is typically called from NMI/IRQ context
* disable pagefaults so that its behaviour is consistent even when * disable pagefaults so that its behaviour is consistent even when
......
...@@ -305,6 +305,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -305,6 +305,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
/* Let nmi_uaccess_okay() know that we're changing CR3. */
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
barrier();
if (need_flush) { if (need_flush) {
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
...@@ -335,6 +339,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -335,6 +339,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
if (next != &init_mm) if (next != &init_mm)
this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
/* Make sure we write CR3 before loaded_mm. */
barrier();
this_cpu_write(cpu_tlbstate.loaded_mm, next); this_cpu_write(cpu_tlbstate.loaded_mm, next);
this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment