Commit ef79970d authored by Tony Luck's avatar Tony Luck Committed by Thomas Gleixner

x86/split-lock: Remove unused TIF_SLD bit

Changes to the "warn" mode of split lock handling mean that TIF_SLD is
never set.

Remove the bit, and the functions that use it.
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20220310204854.31752-3-tony.luck@intel.com
parent b041b525
...@@ -43,14 +43,12 @@ unsigned int x86_model(unsigned int sig); ...@@ -43,14 +43,12 @@ unsigned int x86_model(unsigned int sig);
unsigned int x86_stepping(unsigned int sig); unsigned int x86_stepping(unsigned int sig);
#ifdef CONFIG_CPU_SUP_INTEL #ifdef CONFIG_CPU_SUP_INTEL
extern void __init sld_setup(struct cpuinfo_x86 *c); extern void __init sld_setup(struct cpuinfo_x86 *c);
extern void switch_to_sld(unsigned long tifn);
extern bool handle_user_split_lock(struct pt_regs *regs, long error_code); extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
extern bool handle_guest_split_lock(unsigned long ip); extern bool handle_guest_split_lock(unsigned long ip);
extern void handle_bus_lock(struct pt_regs *regs); extern void handle_bus_lock(struct pt_regs *regs);
u8 get_this_hybrid_cpu_type(void); u8 get_this_hybrid_cpu_type(void);
#else #else
static inline void __init sld_setup(struct cpuinfo_x86 *c) {} static inline void __init sld_setup(struct cpuinfo_x86 *c) {}
static inline void switch_to_sld(unsigned long tifn) {}
static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code) static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{ {
return false; return false;
......
...@@ -92,7 +92,6 @@ struct thread_info { ...@@ -92,7 +92,6 @@ struct thread_info {
#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */ #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_NOTSC 16 /* TSC is not accessible in userland */
#define TIF_NOTIFY_SIGNAL 17 /* signal notifications exist */ #define TIF_NOTIFY_SIGNAL 17 /* signal notifications exist */
#define TIF_SLD 18 /* Restore split lock detection on context switch */
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */ #define TIF_MEMDIE 20 /* is terminating due to OOM killer */
#define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */ #define TIF_IO_BITMAP 22 /* uses I/O bitmap */
...@@ -116,7 +115,6 @@ struct thread_info { ...@@ -116,7 +115,6 @@ struct thread_info {
#define _TIF_NOCPUID (1 << TIF_NOCPUID) #define _TIF_NOCPUID (1 << TIF_NOCPUID)
#define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_NOTSC (1 << TIF_NOTSC)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_SLD (1 << TIF_SLD)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE) #define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
...@@ -128,7 +126,7 @@ struct thread_info { ...@@ -128,7 +126,7 @@ struct thread_info {
/* flags to check in __switch_to() */ /* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW_BASE \ #define _TIF_WORK_CTXSW_BASE \
(_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP | \ (_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP | \
_TIF_SSBD | _TIF_SPEC_FORCE_UPDATE | _TIF_SLD) _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
/* /*
* Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated. * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
......
...@@ -1232,18 +1232,6 @@ void handle_bus_lock(struct pt_regs *regs) ...@@ -1232,18 +1232,6 @@ void handle_bus_lock(struct pt_regs *regs)
} }
} }
/*
* This function is called only when switching between tasks with
* different split-lock detection modes. It sets the MSR for the
* mode of the new task. This is right most of the time, but since
* the MSR is shared by hyperthreads on a physical core there can
* be glitches when the two threads need different modes.
*/
void switch_to_sld(unsigned long tifn)
{
sld_update_msr(!(tifn & _TIF_SLD));
}
/* /*
* Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should * Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should
* only be trusted if it is confirmed that a CPU model implements a * only be trusted if it is confirmed that a CPU model implements a
......
...@@ -686,9 +686,6 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -686,9 +686,6 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
/* Enforce MSR update to ensure consistent state */ /* Enforce MSR update to ensure consistent state */
__speculation_ctrl_update(~tifn, tifn); __speculation_ctrl_update(~tifn, tifn);
} }
if ((tifp ^ tifn) & _TIF_SLD)
switch_to_sld(tifn);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment