Commit b7ceaec1 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Radim Krčmář

x86/asm: Tidy up TSS limit code

In an earlier version of the patch ("x86/kvm/vmx: Defer TR reload
after VM exit") that introduced TSS limit validity tracking, I
confused which helper was which.  On reflection, the names I chose
sucked.  Rename the helpers to make it more obvious what's going on
and add some comments.

While I'm at it, clear __tss_limit_invalid when force-reloading as
well as when contitionally reloading, since any TR reload fixes the
limit.
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent e3736c3e
...@@ -205,6 +205,8 @@ static inline void native_load_tr_desc(void) ...@@ -205,6 +205,8 @@ static inline void native_load_tr_desc(void)
asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
} }
DECLARE_PER_CPU(bool, __tss_limit_invalid);
static inline void force_reload_TR(void) static inline void force_reload_TR(void)
{ {
struct desc_struct *d = get_cpu_gdt_table(smp_processor_id()); struct desc_struct *d = get_cpu_gdt_table(smp_processor_id());
...@@ -220,18 +222,20 @@ static inline void force_reload_TR(void) ...@@ -220,18 +222,20 @@ static inline void force_reload_TR(void)
write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS); write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS);
load_TR_desc(); load_TR_desc();
this_cpu_write(__tss_limit_invalid, false);
} }
DECLARE_PER_CPU(bool, need_tr_refresh); /*
* Call this if you need the TSS limit to be correct, which should be the case
static inline void refresh_TR(void) * if and only if you have TIF_IO_BITMAP set or you're switching to a task
* with TIF_IO_BITMAP set.
*/
static inline void refresh_tss_limit(void)
{ {
DEBUG_LOCKS_WARN_ON(preemptible()); DEBUG_LOCKS_WARN_ON(preemptible());
if (unlikely(this_cpu_read(need_tr_refresh))) { if (unlikely(this_cpu_read(__tss_limit_invalid)))
force_reload_TR(); force_reload_TR();
this_cpu_write(need_tr_refresh, false);
}
} }
/* /*
...@@ -250,7 +254,7 @@ static inline void invalidate_tss_limit(void) ...@@ -250,7 +254,7 @@ static inline void invalidate_tss_limit(void)
if (unlikely(test_thread_flag(TIF_IO_BITMAP))) if (unlikely(test_thread_flag(TIF_IO_BITMAP)))
force_reload_TR(); force_reload_TR();
else else
this_cpu_write(need_tr_refresh, true); this_cpu_write(__tss_limit_invalid, true);
} }
static inline void native_load_gdt(const struct desc_ptr *dtr) static inline void native_load_gdt(const struct desc_ptr *dtr)
......
...@@ -47,8 +47,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) ...@@ -47,8 +47,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
t->io_bitmap_ptr = bitmap; t->io_bitmap_ptr = bitmap;
set_thread_flag(TIF_IO_BITMAP); set_thread_flag(TIF_IO_BITMAP);
/*
* Now that we have an IO bitmap, we need our TSS limit to be
* correct. It's fine if we are preempted after doing this:
* with TIF_IO_BITMAP set, context switches will keep our TSS
* limit correct.
*/
preempt_disable(); preempt_disable();
refresh_TR(); refresh_tss_limit();
preempt_enable(); preempt_enable();
} }
......
...@@ -65,8 +65,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { ...@@ -65,8 +65,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
}; };
EXPORT_PER_CPU_SYMBOL(cpu_tss); EXPORT_PER_CPU_SYMBOL(cpu_tss);
DEFINE_PER_CPU(bool, need_tr_refresh); DEFINE_PER_CPU(bool, __tss_limit_invalid);
EXPORT_PER_CPU_SYMBOL_GPL(need_tr_refresh); EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
/* /*
* this gets called so that we can store lazy state into memory and copy the * this gets called so that we can store lazy state into memory and copy the
...@@ -218,7 +218,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, ...@@ -218,7 +218,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
* Make sure that the TSS limit is correct for the CPU * Make sure that the TSS limit is correct for the CPU
* to notice the IO bitmap. * to notice the IO bitmap.
*/ */
refresh_TR(); refresh_tss_limit();
} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
/* /*
* Clear any possible leftover bits: * Clear any possible leftover bits:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment