Commit a7ba38d6 authored by Catalin Marinas's avatar Catalin Marinas

Merge branch 'for-next/kernel-mode-neon' into for-next/core

* for-next/kernel-mode-neon:
  arm64: neon/efi: Make EFI fpsimd save/restore variables static
  arm64: neon: Forbid when irqs are disabled
  arm64: neon: Export kernel_neon_busy to loadable modules
  arm64: neon: Temporarily add a kernel_mode_begin_partial() definition
  arm64: neon: Remove support for nested or hardirq kernel-mode NEON
  arm64: neon: Allow EFI runtime services to use FPSIMD in irq context
  arm64: fpsimd: Consistently use __this_cpu_ ops where appropriate
  arm64: neon: Add missing header guard in <asm/neon.h>
  arm64: neon: replace generic definition of may_use_simd()
parents cda94408 3b66023d
...@@ -20,7 +20,6 @@ generic-y += rwsem.h ...@@ -20,7 +20,6 @@ generic-y += rwsem.h
generic-y += segment.h generic-y += segment.h
generic-y += serial.h generic-y += serial.h
generic-y += set_memory.h generic-y += set_memory.h
generic-y += simd.h
generic-y += sizes.h generic-y += sizes.h
generic-y += switch_to.h generic-y += switch_to.h
generic-y += trace_clock.h generic-y += trace_clock.h
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <asm/boot.h> #include <asm/boot.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/fpsimd.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
...@@ -21,8 +22,8 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); ...@@ -21,8 +22,8 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define arch_efi_call_virt_setup() \ #define arch_efi_call_virt_setup() \
({ \ ({ \
kernel_neon_begin(); \
efi_virtmap_load(); \ efi_virtmap_load(); \
__efi_fpsimd_begin(); \
}) })
#define arch_efi_call_virt(p, f, args...) \ #define arch_efi_call_virt(p, f, args...) \
...@@ -34,8 +35,8 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); ...@@ -34,8 +35,8 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define arch_efi_call_virt_teardown() \ #define arch_efi_call_virt_teardown() \
({ \ ({ \
__efi_fpsimd_end(); \
efi_virtmap_unload(); \ efi_virtmap_unload(); \
kernel_neon_end(); \
}) })
#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
......
...@@ -41,16 +41,6 @@ struct fpsimd_state { ...@@ -41,16 +41,6 @@ struct fpsimd_state {
unsigned int cpu; unsigned int cpu;
}; };
/*
* Struct for stacking the bottom 'n' FP/SIMD registers.
*/
struct fpsimd_partial_state {
u32 fpsr;
u32 fpcr;
u32 num_regs;
__uint128_t vregs[32];
};
#if defined(__KERNEL__) && defined(CONFIG_COMPAT) #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/* Masks for extracting the FPSR and FPCR from the FPSCR */ /* Masks for extracting the FPSR and FPCR from the FPSCR */
...@@ -77,9 +67,9 @@ extern void fpsimd_update_current_state(struct fpsimd_state *state); ...@@ -77,9 +67,9 @@ extern void fpsimd_update_current_state(struct fpsimd_state *state);
extern void fpsimd_flush_task_state(struct task_struct *target); extern void fpsimd_flush_task_state(struct task_struct *target);
extern void fpsimd_save_partial_state(struct fpsimd_partial_state *state, /* For use by EFI runtime services calls only */
u32 num_regs); extern void __efi_fpsimd_begin(void);
extern void fpsimd_load_partial_state(struct fpsimd_partial_state *state); extern void __efi_fpsimd_end(void);
#endif #endif
......
...@@ -75,59 +75,3 @@ ...@@ -75,59 +75,3 @@
ldr w\tmpnr, [\state, #16 * 2 + 4] ldr w\tmpnr, [\state, #16 * 2 + 4]
fpsimd_restore_fpcr x\tmpnr, \state fpsimd_restore_fpcr x\tmpnr, \state
.endm .endm
.macro fpsimd_save_partial state, numnr, tmpnr1, tmpnr2
mrs x\tmpnr1, fpsr
str w\numnr, [\state, #8]
mrs x\tmpnr2, fpcr
stp w\tmpnr1, w\tmpnr2, [\state]
adr x\tmpnr1, 0f
add \state, \state, x\numnr, lsl #4
sub x\tmpnr1, x\tmpnr1, x\numnr, lsl #1
br x\tmpnr1
stp q30, q31, [\state, #-16 * 30 - 16]
stp q28, q29, [\state, #-16 * 28 - 16]
stp q26, q27, [\state, #-16 * 26 - 16]
stp q24, q25, [\state, #-16 * 24 - 16]
stp q22, q23, [\state, #-16 * 22 - 16]
stp q20, q21, [\state, #-16 * 20 - 16]
stp q18, q19, [\state, #-16 * 18 - 16]
stp q16, q17, [\state, #-16 * 16 - 16]
stp q14, q15, [\state, #-16 * 14 - 16]
stp q12, q13, [\state, #-16 * 12 - 16]
stp q10, q11, [\state, #-16 * 10 - 16]
stp q8, q9, [\state, #-16 * 8 - 16]
stp q6, q7, [\state, #-16 * 6 - 16]
stp q4, q5, [\state, #-16 * 4 - 16]
stp q2, q3, [\state, #-16 * 2 - 16]
stp q0, q1, [\state, #-16 * 0 - 16]
0:
.endm
.macro fpsimd_restore_partial state, tmpnr1, tmpnr2
ldp w\tmpnr1, w\tmpnr2, [\state]
msr fpsr, x\tmpnr1
fpsimd_restore_fpcr x\tmpnr2, x\tmpnr1
adr x\tmpnr1, 0f
ldr w\tmpnr2, [\state, #8]
add \state, \state, x\tmpnr2, lsl #4
sub x\tmpnr1, x\tmpnr1, x\tmpnr2, lsl #1
br x\tmpnr1
ldp q30, q31, [\state, #-16 * 30 - 16]
ldp q28, q29, [\state, #-16 * 28 - 16]
ldp q26, q27, [\state, #-16 * 26 - 16]
ldp q24, q25, [\state, #-16 * 24 - 16]
ldp q22, q23, [\state, #-16 * 22 - 16]
ldp q20, q21, [\state, #-16 * 20 - 16]
ldp q18, q19, [\state, #-16 * 18 - 16]
ldp q16, q17, [\state, #-16 * 16 - 16]
ldp q14, q15, [\state, #-16 * 14 - 16]
ldp q12, q13, [\state, #-16 * 12 - 16]
ldp q10, q11, [\state, #-16 * 10 - 16]
ldp q8, q9, [\state, #-16 * 8 - 16]
ldp q6, q7, [\state, #-16 * 6 - 16]
ldp q4, q5, [\state, #-16 * 4 - 16]
ldp q2, q3, [\state, #-16 * 2 - 16]
ldp q0, q1, [\state, #-16 * 0 - 16]
0:
.endm
...@@ -8,12 +8,22 @@ ...@@ -8,12 +8,22 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#ifndef __ASM_NEON_H
#define __ASM_NEON_H
#include <linux/types.h> #include <linux/types.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#define cpu_has_neon() system_supports_fpsimd() #define cpu_has_neon() system_supports_fpsimd()
#define kernel_neon_begin() kernel_neon_begin_partial(32) void kernel_neon_begin(void);
void kernel_neon_begin_partial(u32 num_regs);
void kernel_neon_end(void); void kernel_neon_end(void);
/*
* Temporary macro to allow the crypto code to compile. Note that the
* semantics of kernel_neon_begin_partial() are now different from the
* original as it does not allow being called in an interrupt context.
*/
#define kernel_neon_begin_partial(num_regs) kernel_neon_begin()
#endif /* ! __ASM_NEON_H */
/*
* Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#ifndef __ASM_SIMD_H
#define __ASM_SIMD_H
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/types.h>
#ifdef CONFIG_KERNEL_MODE_NEON
DECLARE_PER_CPU(bool, kernel_neon_busy);
/*
* may_use_simd - whether it is allowable at this time to issue SIMD
* instructions or access the SIMD register file
*
* Callers must not assume that the result remains true beyond the next
* preempt_enable() or return from softirq context.
*/
static __must_check inline bool may_use_simd(void)
{
/*
* The raw_cpu_read() is racy if called with preemption enabled.
* This is not a bug: kernel_neon_busy is only set when
* preemption is disabled, so we cannot migrate to another CPU
* while it is set, nor can we migrate to a CPU where it is set.
* So, if we find it clear on some CPU then we're guaranteed to
* find it clear on any CPU we could migrate to.
*
* If we are in between kernel_neon_begin()...kernel_neon_end(),
* the flag will be set, but preemption is also disabled, so we
* can't migrate to another CPU and spuriously see it become
* false.
*/
return !in_irq() && !irqs_disabled() && !in_nmi() &&
!raw_cpu_read(kernel_neon_busy);
}
#else /* ! CONFIG_KERNEL_MODE_NEON */
static __must_check inline bool may_use_simd(void) {
return false;
}
#endif /* ! CONFIG_KERNEL_MODE_NEON */
#endif
...@@ -41,27 +41,3 @@ ENTRY(fpsimd_load_state) ...@@ -41,27 +41,3 @@ ENTRY(fpsimd_load_state)
fpsimd_restore x0, 8 fpsimd_restore x0, 8
ret ret
ENDPROC(fpsimd_load_state) ENDPROC(fpsimd_load_state)
#ifdef CONFIG_KERNEL_MODE_NEON
/*
* Save the bottom n FP registers.
*
* x0 - pointer to struct fpsimd_partial_state
*/
ENTRY(fpsimd_save_partial_state)
fpsimd_save_partial x0, 1, 8, 9
ret
ENDPROC(fpsimd_save_partial_state)
/*
* Load the bottom n FP registers.
*
* x0 - pointer to struct fpsimd_partial_state
*/
ENTRY(fpsimd_load_partial_state)
fpsimd_restore_partial x0, 8, 9
ret
ENDPROC(fpsimd_load_partial_state)
#endif
...@@ -17,16 +17,19 @@ ...@@ -17,16 +17,19 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/bottom_half.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/hardirq.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/simd.h>
#define FPEXC_IOF (1 << 0) #define FPEXC_IOF (1 << 0)
#define FPEXC_DZF (1 << 1) #define FPEXC_DZF (1 << 1)
...@@ -62,6 +65,13 @@ ...@@ -62,6 +65,13 @@
* CPU currently contain the most recent userland FPSIMD state of the current * CPU currently contain the most recent userland FPSIMD state of the current
* task. * task.
* *
* In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
* save the task's FPSIMD context back to task_struct from softirq context.
* To prevent this from racing with the manipulation of the task's FPSIMD state
* from task context and thereby corrupting the state, it is necessary to
* protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
* flag with local_bh_disable() unless softirqs are already masked.
*
* For a certain task, the sequence may look something like this: * For a certain task, the sequence may look something like this:
* - the task gets scheduled in; if both the task's fpsimd_state.cpu field * - the task gets scheduled in; if both the task's fpsimd_state.cpu field
* contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
...@@ -161,9 +171,14 @@ void fpsimd_flush_thread(void) ...@@ -161,9 +171,14 @@ void fpsimd_flush_thread(void)
{ {
if (!system_supports_fpsimd()) if (!system_supports_fpsimd())
return; return;
local_bh_disable();
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current); fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE); set_thread_flag(TIF_FOREIGN_FPSTATE);
local_bh_enable();
} }
/* /*
...@@ -174,10 +189,13 @@ void fpsimd_preserve_current_state(void) ...@@ -174,10 +189,13 @@ void fpsimd_preserve_current_state(void)
{ {
if (!system_supports_fpsimd()) if (!system_supports_fpsimd())
return; return;
preempt_disable();
local_bh_disable();
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
fpsimd_save_state(&current->thread.fpsimd_state); fpsimd_save_state(&current->thread.fpsimd_state);
preempt_enable();
local_bh_enable();
} }
/* /*
...@@ -189,15 +207,18 @@ void fpsimd_restore_current_state(void) ...@@ -189,15 +207,18 @@ void fpsimd_restore_current_state(void)
{ {
if (!system_supports_fpsimd()) if (!system_supports_fpsimd())
return; return;
preempt_disable();
local_bh_disable();
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_state *st = &current->thread.fpsimd_state; struct fpsimd_state *st = &current->thread.fpsimd_state;
fpsimd_load_state(st); fpsimd_load_state(st);
this_cpu_write(fpsimd_last_state, st); __this_cpu_write(fpsimd_last_state, st);
st->cpu = smp_processor_id(); st->cpu = smp_processor_id();
} }
preempt_enable();
local_bh_enable();
} }
/* /*
...@@ -209,15 +230,18 @@ void fpsimd_update_current_state(struct fpsimd_state *state) ...@@ -209,15 +230,18 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
{ {
if (!system_supports_fpsimd()) if (!system_supports_fpsimd())
return; return;
preempt_disable();
local_bh_disable();
fpsimd_load_state(state); fpsimd_load_state(state);
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_state *st = &current->thread.fpsimd_state; struct fpsimd_state *st = &current->thread.fpsimd_state;
this_cpu_write(fpsimd_last_state, st); __this_cpu_write(fpsimd_last_state, st);
st->cpu = smp_processor_id(); st->cpu = smp_processor_id();
} }
preempt_enable();
local_bh_enable();
} }
/* /*
...@@ -230,52 +254,122 @@ void fpsimd_flush_task_state(struct task_struct *t) ...@@ -230,52 +254,122 @@ void fpsimd_flush_task_state(struct task_struct *t)
#ifdef CONFIG_KERNEL_MODE_NEON #ifdef CONFIG_KERNEL_MODE_NEON
static DEFINE_PER_CPU(struct fpsimd_partial_state, hardirq_fpsimdstate); DEFINE_PER_CPU(bool, kernel_neon_busy);
static DEFINE_PER_CPU(struct fpsimd_partial_state, softirq_fpsimdstate); EXPORT_PER_CPU_SYMBOL(kernel_neon_busy);
/* /*
* Kernel-side NEON support functions * Kernel-side NEON support functions
*/ */
void kernel_neon_begin_partial(u32 num_regs)
/*
* kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
* context
*
* Must not be called unless may_use_simd() returns true.
* Task context in the FPSIMD registers is saved back to memory as necessary.
*
* A matching call to kernel_neon_end() must be made before returning from the
* calling context.
*
* The caller may freely use the FPSIMD registers until kernel_neon_end() is
* called.
*/
void kernel_neon_begin(void)
{ {
if (WARN_ON(!system_supports_fpsimd())) if (WARN_ON(!system_supports_fpsimd()))
return; return;
if (in_interrupt()) {
struct fpsimd_partial_state *s = this_cpu_ptr(
in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
BUG_ON(num_regs > 32); BUG_ON(!may_use_simd());
fpsimd_save_partial_state(s, roundup(num_regs, 2));
} else { local_bh_disable();
/*
* Save the userland FPSIMD state if we have one and if we __this_cpu_write(kernel_neon_busy, true);
* haven't done so already. Clear fpsimd_last_state to indicate
* that there is no longer userland FPSIMD state in the /* Save unsaved task fpsimd state, if any: */
* registers. if (current->mm && !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
*/
preempt_disable();
if (current->mm &&
!test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
fpsimd_save_state(&current->thread.fpsimd_state); fpsimd_save_state(&current->thread.fpsimd_state);
this_cpu_write(fpsimd_last_state, NULL);
} /* Invalidate any task state remaining in the fpsimd regs: */
__this_cpu_write(fpsimd_last_state, NULL);
preempt_disable();
local_bh_enable();
} }
EXPORT_SYMBOL(kernel_neon_begin_partial); EXPORT_SYMBOL(kernel_neon_begin);
/*
* kernel_neon_end(): give the CPU FPSIMD registers back to the current task
*
* Must be called from a context in which kernel_neon_begin() was previously
* called, with no call to kernel_neon_end() in the meantime.
*
* The caller must not use the FPSIMD registers after this function is called,
* unless kernel_neon_begin() is called again in the meantime.
*/
void kernel_neon_end(void) void kernel_neon_end(void)
{ {
bool busy;
if (!system_supports_fpsimd()) if (!system_supports_fpsimd())
return; return;
if (in_interrupt()) {
struct fpsimd_partial_state *s = this_cpu_ptr( busy = __this_cpu_xchg(kernel_neon_busy, false);
in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate); WARN_ON(!busy); /* No matching kernel_neon_begin()? */
fpsimd_load_partial_state(s);
} else {
preempt_enable(); preempt_enable();
}
} }
EXPORT_SYMBOL(kernel_neon_end); EXPORT_SYMBOL(kernel_neon_end);
static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state);
static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
/*
* EFI runtime services support functions
*
* The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
* This means that for EFI (and only for EFI), we have to assume that FPSIMD
* is always used rather than being an optional accelerator.
*
* These functions provide the necessary support for ensuring FPSIMD
* save/restore in the contexts from which EFI is used.
*
* Do not use them for any other purpose -- if tempted to do so, you are
* either doing something wrong or you need to propose some refactoring.
*/
/*
* __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
*/
void __efi_fpsimd_begin(void)
{
if (!system_supports_fpsimd())
return;
WARN_ON(preemptible());
if (may_use_simd())
kernel_neon_begin();
else {
fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
__this_cpu_write(efi_fpsimd_state_used, true);
}
}
/*
* __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
*/
void __efi_fpsimd_end(void)
{
if (!system_supports_fpsimd())
return;
if (__this_cpu_xchg(efi_fpsimd_state_used, false))
fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
else
kernel_neon_end();
}
#endif /* CONFIG_KERNEL_MODE_NEON */ #endif /* CONFIG_KERNEL_MODE_NEON */
#ifdef CONFIG_CPU_PM #ifdef CONFIG_CPU_PM
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment