Commit 87ab4689 authored by Chang S. Bae's avatar Chang S. Bae Committed by Thomas Gleixner

x86/fsgsbase/64: Fix the base write helper functions

Andy spotted a regression in the fs/gs base helpers after the patch series
was committed. The helper functions which write fs/gs base are not just
writing the base, they are also changing the index. That's wrong and needs
to be separated because writing the base has not to modify the index.

While the regression is not causing any harm right now because the only
caller depends on that behaviour, it's a guarantee for subtle breakage down
the road.

Make the index explicitly changed from the caller, instead of including
the code in the helpers.

Subsequently, the task write helpers do not handle for the current task
anymore. The range check for a base value is also factored out, to minimize
code redundancy from the caller.

Fixes: b1378a56 ("x86/fsgsbase/64: Introduce FS/GS base helper functions")
Suggested-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatarChang S. Bae <chang.seok.bae@intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarAndy Lutomirski <luto@kernel.org>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ravi Shankar <ravi.v.shankar@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Link: https://lkml.kernel.org/r/20181126195524.32179-1-chang.seok.bae@intel.com
parent 721066df
...@@ -16,8 +16,8 @@ ...@@ -16,8 +16,8 @@
*/ */
extern unsigned long x86_fsbase_read_task(struct task_struct *task); extern unsigned long x86_fsbase_read_task(struct task_struct *task);
extern unsigned long x86_gsbase_read_task(struct task_struct *task); extern unsigned long x86_gsbase_read_task(struct task_struct *task);
extern int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase); extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase);
extern int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase); extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase);
/* Helper functions for reading/writing FS/GS base */ /* Helper functions for reading/writing FS/GS base */
...@@ -39,8 +39,15 @@ static inline unsigned long x86_gsbase_read_cpu_inactive(void) ...@@ -39,8 +39,15 @@ static inline unsigned long x86_gsbase_read_cpu_inactive(void)
return gsbase; return gsbase;
} }
extern void x86_fsbase_write_cpu(unsigned long fsbase); static inline void x86_fsbase_write_cpu(unsigned long fsbase)
extern void x86_gsbase_write_cpu_inactive(unsigned long gsbase); {
wrmsrl(MSR_FS_BASE, fsbase);
}
static inline void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
{
wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
}
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
......
...@@ -339,24 +339,6 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task, ...@@ -339,24 +339,6 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
return base; return base;
} }
void x86_fsbase_write_cpu(unsigned long fsbase)
{
/*
* Set the selector to 0 as a notion, that the segment base is
* overwritten, which will be checked for skipping the segment load
* during context switch.
*/
loadseg(FS, 0);
wrmsrl(MSR_FS_BASE, fsbase);
}
void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
{
/* Set the selector to 0 for the same reason as %fs above. */
loadseg(GS, 0);
wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
}
unsigned long x86_fsbase_read_task(struct task_struct *task) unsigned long x86_fsbase_read_task(struct task_struct *task)
{ {
unsigned long fsbase; unsigned long fsbase;
...@@ -385,38 +367,18 @@ unsigned long x86_gsbase_read_task(struct task_struct *task) ...@@ -385,38 +367,18 @@ unsigned long x86_gsbase_read_task(struct task_struct *task)
return gsbase; return gsbase;
} }
int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
{ {
/* WARN_ON_ONCE(task == current);
* Not strictly needed for %fs, but do it for symmetry
* with %gs
*/
if (unlikely(fsbase >= TASK_SIZE_MAX))
return -EPERM;
preempt_disable();
task->thread.fsbase = fsbase; task->thread.fsbase = fsbase;
if (task == current)
x86_fsbase_write_cpu(fsbase);
task->thread.fsindex = 0;
preempt_enable();
return 0;
} }
int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
{ {
if (unlikely(gsbase >= TASK_SIZE_MAX)) WARN_ON_ONCE(task == current);
return -EPERM;
preempt_disable();
task->thread.gsbase = gsbase; task->thread.gsbase = gsbase;
if (task == current)
x86_gsbase_write_cpu_inactive(gsbase);
task->thread.gsindex = 0;
preempt_enable();
return 0;
} }
int copy_thread_tls(unsigned long clone_flags, unsigned long sp, int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
...@@ -754,11 +716,60 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) ...@@ -754,11 +716,60 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
switch (option) { switch (option) {
case ARCH_SET_GS: { case ARCH_SET_GS: {
ret = x86_gsbase_write_task(task, arg2); if (unlikely(arg2 >= TASK_SIZE_MAX))
return -EPERM;
preempt_disable();
/*
* ARCH_SET_GS has always overwritten the index
* and the base. Zero is the most sensible value
* to put in the index, and is the only value that
* makes any sense if FSGSBASE is unavailable.
*/
if (task == current) {
loadseg(GS, 0);
x86_gsbase_write_cpu_inactive(arg2);
/*
* On non-FSGSBASE systems, save_base_legacy() expects
* that we also fill in thread.gsbase.
*/
task->thread.gsbase = arg2;
} else {
task->thread.gsindex = 0;
x86_gsbase_write_task(task, arg2);
}
preempt_enable();
break; break;
} }
case ARCH_SET_FS: { case ARCH_SET_FS: {
ret = x86_fsbase_write_task(task, arg2); /*
* Not strictly needed for %fs, but do it for symmetry
* with %gs
*/
if (unlikely(arg2 >= TASK_SIZE_MAX))
return -EPERM;
preempt_disable();
/*
* Set the selector to 0 for the same reason
* as %gs above.
*/
if (task == current) {
loadseg(FS, 0);
x86_fsbase_write_cpu(arg2);
/*
* On non-FSGSBASE systems, save_base_legacy() expects
* that we also fill in thread.fsbase.
*/
task->thread.fsbase = arg2;
} else {
task->thread.fsindex = 0;
x86_fsbase_write_task(task, arg2);
}
preempt_enable();
break; break;
} }
case ARCH_GET_FS: { case ARCH_GET_FS: {
......
...@@ -397,11 +397,12 @@ static int putreg(struct task_struct *child, ...@@ -397,11 +397,12 @@ static int putreg(struct task_struct *child,
if (value >= TASK_SIZE_MAX) if (value >= TASK_SIZE_MAX)
return -EIO; return -EIO;
/* /*
* When changing the FS base, use the same * When changing the FS base, use do_arch_prctl_64()
* mechanism as for do_arch_prctl_64(). * to set the index to zero and to set the base
* as requested.
*/ */
if (child->thread.fsbase != value) if (child->thread.fsbase != value)
return x86_fsbase_write_task(child, value); return do_arch_prctl_64(child, ARCH_SET_FS, value);
return 0; return 0;
case offsetof(struct user_regs_struct,gs_base): case offsetof(struct user_regs_struct,gs_base):
/* /*
...@@ -410,7 +411,7 @@ static int putreg(struct task_struct *child, ...@@ -410,7 +411,7 @@ static int putreg(struct task_struct *child,
if (value >= TASK_SIZE_MAX) if (value >= TASK_SIZE_MAX)
return -EIO; return -EIO;
if (child->thread.gsbase != value) if (child->thread.gsbase != value)
return x86_gsbase_write_task(child, value); return do_arch_prctl_64(child, ARCH_SET_GS, value);
return 0; return 0;
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment