Commit 48a60bdb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'core_entry_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull thread_info flag accessor helper updates from Borislav Petkov:
 "Add a set of thread_info.flags accessors which snapshot it before
  accesing it in order to prevent any potential data races, and convert
  all users to those new accessors"

* tag 'core_entry_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  powerpc: Snapshot thread flags
  powerpc: Avoid discarding flags in system_call_exception()
  openrisc: Snapshot thread flags
  microblaze: Snapshot thread flags
  arm64: Snapshot thread flags
  ARM: Snapshot thread flags
  alpha: Snapshot thread flags
  sched: Snapshot thread flags
  entry: Snapshot thread flags
  x86: Snapshot thread flags
  thread_info: Add helpers to snapshot thread flags
parents 5ba13c1c 985faa78
......@@ -535,6 +535,6 @@ do_work_pending(struct pt_regs *regs, unsigned long thread_flags,
}
}
local_irq_disable();
thread_flags = current_thread_info()->flags;
thread_flags = read_thread_flags();
} while (thread_flags & _TIF_WORK_MASK);
}
......@@ -631,7 +631,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
}
}
local_irq_disable();
thread_flags = current_thread_info()->flags;
thread_flags = read_thread_flags();
} while (thread_flags & _TIF_WORK_MASK);
return 0;
}
......
......@@ -990,7 +990,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* there is no work pending for this thread.
*/
raw_local_irq_disable();
if (!(current_thread_info()->flags & _TIF_WORK_MASK))
if (!(read_thread_flags() & _TIF_WORK_MASK))
set_cr(cr_no_alignment);
}
......
......@@ -129,7 +129,7 @@ static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
local_daif_mask();
flags = READ_ONCE(current_thread_info()->flags);
flags = read_thread_flags();
if (unlikely(flags & _TIF_WORK_MASK))
do_notify_resume(regs, flags);
}
......
......@@ -1839,7 +1839,7 @@ static void tracehook_report_syscall(struct pt_regs *regs,
int syscall_trace_enter(struct pt_regs *regs)
{
unsigned long flags = READ_ONCE(current_thread_info()->flags);
unsigned long flags = read_thread_flags();
if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
......@@ -1862,7 +1862,7 @@ int syscall_trace_enter(struct pt_regs *regs)
void syscall_trace_exit(struct pt_regs *regs)
{
unsigned long flags = READ_ONCE(current_thread_info()->flags);
unsigned long flags = read_thread_flags();
audit_syscall_exit(regs);
......
......@@ -948,7 +948,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
}
local_daif_mask();
thread_flags = READ_ONCE(current_thread_info()->flags);
thread_flags = read_thread_flags();
} while (thread_flags & _TIF_WORK_MASK);
}
......
......@@ -81,7 +81,7 @@ void syscall_trace_exit(struct pt_regs *regs);
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[])
{
unsigned long flags = current_thread_info()->flags;
unsigned long flags = read_thread_flags();
regs->orig_x0 = regs->regs[0];
regs->syscallno = scno;
......@@ -148,7 +148,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
*/
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
local_daif_mask();
flags = current_thread_info()->flags;
flags = read_thread_flags();
if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
return;
local_daif_restore(DAIF_PROCCTX);
......
......@@ -283,7 +283,7 @@ static void do_signal(struct pt_regs *regs, int in_syscall)
#ifdef DEBUG_SIG
pr_info("do signal: %p %d\n", regs, in_syscall);
pr_info("do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1,
regs->r12, current_thread_info()->flags);
regs->r12, read_thread_flags());
#endif
if (get_signal(&ksig)) {
......
......@@ -313,7 +313,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
}
}
local_irq_disable();
thread_flags = current_thread_info()->flags;
thread_flags = read_thread_flags();
} while (thread_flags & _TIF_WORK_MASK);
return 0;
}
......@@ -148,7 +148,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
*/
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
current_thread_info()->flags |= _TIF_RESTOREALL;
set_bits(_TIF_RESTOREALL, &current_thread_info()->flags);
/*
* If the system call was made with a transaction active, doom it and
......@@ -181,7 +181,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
local_irq_enable();
if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) {
if (unlikely(trap_is_unsupported_scv(regs))) {
/* Unsupported scv vector */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
......@@ -343,7 +343,7 @@ interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
unsigned long ti_flags;
again:
ti_flags = READ_ONCE(current_thread_info()->flags);
ti_flags = read_thread_flags();
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable();
if (ti_flags & _TIF_NEED_RESCHED) {
......@@ -359,7 +359,7 @@ interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
do_notify_resume(regs, ti_flags);
}
local_irq_disable();
ti_flags = READ_ONCE(current_thread_info()->flags);
ti_flags = read_thread_flags();
}
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
......@@ -437,7 +437,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
/* Check whether the syscall is issued inside a restartable sequence */
rseq_syscall(regs);
ti_flags = current_thread_info()->flags;
ti_flags = read_thread_flags();
if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
......@@ -532,8 +532,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
unsigned long flags;
unsigned long ret = 0;
unsigned long kuap;
bool stack_store = current_thread_info()->flags &
_TIF_EMULATE_STACK_STORE;
bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE;
if (regs_is_unrecoverable(regs))
unrecoverable_exception(regs);
......@@ -554,7 +553,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
again:
if (IS_ENABLED(CONFIG_PREEMPT)) {
/* Return to preemptible kernel context */
if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED)) {
if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
if (preempt_count() == 0)
preempt_schedule_irq();
}
......
......@@ -260,8 +260,7 @@ long do_syscall_trace_enter(struct pt_regs *regs)
{
u32 flags;
flags = READ_ONCE(current_thread_info()->flags) &
(_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
flags = read_thread_flags() & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
if (flags) {
int rc = tracehook_report_syscall_entry(regs);
......
......@@ -365,7 +365,7 @@ void arch_setup_new_exec(void)
clear_thread_flag(TIF_SSBD);
task_clear_spec_ssb_disable(current);
task_clear_spec_ssb_noexec(current);
speculation_ctrl_update(task_thread_info(current)->flags);
speculation_ctrl_update(read_thread_flags());
}
}
......@@ -617,7 +617,7 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
}
/* Return the updated threadinfo flags*/
return task_thread_info(tsk)->flags;
return read_task_thread_flags(tsk);
}
void speculation_ctrl_update(unsigned long tif)
......@@ -653,8 +653,8 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
{
unsigned long tifp, tifn;
tifn = READ_ONCE(task_thread_info(next_p)->flags);
tifp = READ_ONCE(task_thread_info(prev_p)->flags);
tifn = read_task_thread_flags(next_p);
tifp = read_task_thread_flags(prev_p);
switch_to_bitmap(tifp);
......
......@@ -13,8 +13,8 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
static inline void switch_to_extra(struct task_struct *prev,
struct task_struct *next)
{
unsigned long next_tif = task_thread_info(next)->flags;
unsigned long prev_tif = task_thread_info(prev)->flags;
unsigned long next_tif = read_task_thread_flags(next);
unsigned long prev_tif = read_task_thread_flags(prev);
if (IS_ENABLED(CONFIG_SMP)) {
/*
......
......@@ -361,7 +361,7 @@ static void l1d_flush_evaluate(unsigned long prev_mm, unsigned long next_mm,
static unsigned long mm_mangle_tif_spec_bits(struct task_struct *next)
{
unsigned long next_tif = task_thread_info(next)->flags;
unsigned long next_tif = read_task_thread_flags(next);
unsigned long spec_bits = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_SPEC_MASK;
/*
......
......@@ -75,7 +75,7 @@ static inline void xfer_to_guest_mode_prepare(void)
*/
static inline bool __xfer_to_guest_mode_work_pending(void)
{
unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
unsigned long ti_work = read_thread_flags();
return !!(ti_work & XFER_TO_GUEST_MODE_WORK);
}
......
......@@ -118,6 +118,15 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
return test_bit(flag, (unsigned long *)&ti->flags);
}
/*
* This may be used in noinstr code, and needs to be __always_inline to prevent
* inadvertent instrumentation.
*/
static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti)
{
return READ_ONCE(ti->flags);
}
#define set_thread_flag(flag) \
set_ti_thread_flag(current_thread_info(), flag)
#define clear_thread_flag(flag) \
......@@ -130,6 +139,11 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
test_and_clear_ti_thread_flag(current_thread_info(), flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
#define read_thread_flags() \
read_ti_thread_flags(current_thread_info())
#define read_task_thread_flags(t) \
read_ti_thread_flags(task_thread_info(t))
#ifdef CONFIG_GENERIC_ENTRY
#define set_syscall_work(fl) \
......
......@@ -187,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
/* Check if any of the above work has queued a deferred wakeup */
tick_nohz_user_enter_prepare();
ti_work = READ_ONCE(current_thread_info()->flags);
ti_work = read_thread_flags();
}
/* Return the latest work state for arch_exit_to_user_mode() */
......@@ -196,7 +196,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
static void exit_to_user_mode_prepare(struct pt_regs *regs)
{
unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
unsigned long ti_work = read_thread_flags();
lockdep_assert_irqs_disabled();
......
......@@ -26,7 +26,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
if (ret)
return ret;
ti_work = READ_ONCE(current_thread_info()->flags);
ti_work = read_thread_flags();
} while (ti_work & XFER_TO_GUEST_MODE_WORK || need_resched());
return 0;
}
......@@ -43,7 +43,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
* disabled in the inner loop before going into guest mode. No need
* to disable interrupts here.
*/
ti_work = READ_ONCE(current_thread_info()->flags);
ti_work = read_thread_flags();
if (!(ti_work & XFER_TO_GUEST_MODE_WORK))
return 0;
......
......@@ -8520,7 +8520,7 @@ void sched_show_task(struct task_struct *p)
rcu_read_unlock();
pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n",
free, task_pid_nr(p), ppid,
(unsigned long)task_thread_info(p)->flags);
read_task_thread_flags(p));
print_worker_info(KERN_INFO, p);
print_stop_info(KERN_INFO, p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment