Commit f8818559 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-urgent-2020-09-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
 "Two fixes for the x86 interrupt code:

   - Unbreak the magic 'search the timer interrupt' logic in IO/APIC
     code which got wreckaged when the core interrupt code made the
     state tracking logic stricter.

     That caused the interrupt line to stay masked after switching from
     IO/APIC to PIC delivery mode, which obviously prevents interrupts
     from being delivered.

   - Make run_on_irqstack_code() typesafe. The function argument is a
     void pointer which is then cast to 'void (*fun)(void *).

     This breaks Control Flow Integrity checking in clang. Use proper
     helper functions for the three variants reuqired"

* tag 'x86-urgent-2020-09-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/ioapic: Unbreak check_timer()
  x86/irq: Make run_on_irqstack_cond() typesafe
parents ba25f057 86a82ae0
...@@ -299,7 +299,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs) ...@@ -299,7 +299,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);
instrumentation_begin(); instrumentation_begin();
run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs); run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
instrumentation_begin(); instrumentation_begin();
set_irq_regs(old_regs); set_irq_regs(old_regs);
......
...@@ -682,6 +682,8 @@ SYM_CODE_END(.Lbad_gs) ...@@ -682,6 +682,8 @@ SYM_CODE_END(.Lbad_gs)
* rdx: Function argument (can be NULL if none) * rdx: Function argument (can be NULL if none)
*/ */
SYM_FUNC_START(asm_call_on_stack) SYM_FUNC_START(asm_call_on_stack)
SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
/* /*
* Save the frame pointer unconditionally. This allows the ORC * Save the frame pointer unconditionally. This allows the ORC
* unwinder to handle the stack switch. * unwinder to handle the stack switch.
......
...@@ -242,7 +242,7 @@ __visible noinstr void func(struct pt_regs *regs) \ ...@@ -242,7 +242,7 @@ __visible noinstr void func(struct pt_regs *regs) \
instrumentation_begin(); \ instrumentation_begin(); \
irq_enter_rcu(); \ irq_enter_rcu(); \
kvm_set_cpu_l1tf_flush_l1d(); \ kvm_set_cpu_l1tf_flush_l1d(); \
run_on_irqstack_cond(__##func, regs, regs); \ run_sysvec_on_irqstack_cond(__##func, regs); \
irq_exit_rcu(); \ irq_exit_rcu(); \
instrumentation_end(); \ instrumentation_end(); \
irqentry_exit(regs, state); \ irqentry_exit(regs, state); \
......
...@@ -12,20 +12,50 @@ static __always_inline bool irqstack_active(void) ...@@ -12,20 +12,50 @@ static __always_inline bool irqstack_active(void)
return __this_cpu_read(irq_count) != -1; return __this_cpu_read(irq_count) != -1;
} }
void asm_call_on_stack(void *sp, void *func, void *arg); void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs),
struct pt_regs *regs);
void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
struct irq_desc *desc);
static __always_inline void __run_on_irqstack(void *func, void *arg) static __always_inline void __run_on_irqstack(void (*func)(void))
{ {
void *tos = __this_cpu_read(hardirq_stack_ptr); void *tos = __this_cpu_read(hardirq_stack_ptr);
__this_cpu_add(irq_count, 1); __this_cpu_add(irq_count, 1);
asm_call_on_stack(tos - 8, func, arg); asm_call_on_stack(tos - 8, func, NULL);
__this_cpu_sub(irq_count, 1);
}
static __always_inline void
__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
struct pt_regs *regs)
{
void *tos = __this_cpu_read(hardirq_stack_ptr);
__this_cpu_add(irq_count, 1);
asm_call_sysvec_on_stack(tos - 8, func, regs);
__this_cpu_sub(irq_count, 1);
}
static __always_inline void
__run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
struct irq_desc *desc)
{
void *tos = __this_cpu_read(hardirq_stack_ptr);
__this_cpu_add(irq_count, 1);
asm_call_irq_on_stack(tos - 8, func, desc);
__this_cpu_sub(irq_count, 1); __this_cpu_sub(irq_count, 1);
} }
#else /* CONFIG_X86_64 */ #else /* CONFIG_X86_64 */
static inline bool irqstack_active(void) { return false; } static inline bool irqstack_active(void) { return false; }
static inline void __run_on_irqstack(void *func, void *arg) { } static inline void __run_on_irqstack(void (*func)(void)) { }
static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
struct pt_regs *regs) { }
static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
struct irq_desc *desc) { }
#endif /* !CONFIG_X86_64 */ #endif /* !CONFIG_X86_64 */
static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs) static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
...@@ -37,17 +67,40 @@ static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs) ...@@ -37,17 +67,40 @@ static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
return !user_mode(regs) && !irqstack_active(); return !user_mode(regs) && !irqstack_active();
} }
static __always_inline void run_on_irqstack_cond(void *func, void *arg,
static __always_inline void run_on_irqstack_cond(void (*func)(void),
struct pt_regs *regs) struct pt_regs *regs)
{ {
void (*__func)(void *arg) = func; lockdep_assert_irqs_disabled();
if (irq_needs_irq_stack(regs))
__run_on_irqstack(func);
else
func();
}
static __always_inline void
run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs),
struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
if (irq_needs_irq_stack(regs))
__run_sysvec_on_irqstack(func, regs);
else
func(regs);
}
static __always_inline void
run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc,
struct pt_regs *regs)
{
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
if (irq_needs_irq_stack(regs)) if (irq_needs_irq_stack(regs))
__run_on_irqstack(__func, arg); __run_irq_on_irqstack(func, desc);
else else
__func(arg); func(desc);
} }
#endif #endif
...@@ -2243,6 +2243,7 @@ static inline void __init check_timer(void) ...@@ -2243,6 +2243,7 @@ static inline void __init check_timer(void)
legacy_pic->init(0); legacy_pic->init(0);
legacy_pic->make_irq(0); legacy_pic->make_irq(0);
apic_write(APIC_LVT0, APIC_DM_EXTINT); apic_write(APIC_LVT0, APIC_DM_EXTINT);
legacy_pic->unmask(0);
unlock_ExtINT_logic(); unlock_ExtINT_logic();
......
...@@ -227,7 +227,7 @@ static __always_inline void handle_irq(struct irq_desc *desc, ...@@ -227,7 +227,7 @@ static __always_inline void handle_irq(struct irq_desc *desc,
struct pt_regs *regs) struct pt_regs *regs)
{ {
if (IS_ENABLED(CONFIG_X86_64)) if (IS_ENABLED(CONFIG_X86_64))
run_on_irqstack_cond(desc->handle_irq, desc, regs); run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
else else
__handle_irq(desc, regs); __handle_irq(desc, regs);
} }
......
...@@ -74,5 +74,5 @@ int irq_init_percpu_irqstack(unsigned int cpu) ...@@ -74,5 +74,5 @@ int irq_init_percpu_irqstack(unsigned int cpu)
void do_softirq_own_stack(void) void do_softirq_own_stack(void)
{ {
run_on_irqstack_cond(__do_softirq, NULL, NULL); run_on_irqstack_cond(__do_softirq, NULL);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment