Commit 7e92e01b authored by Rohan McLure's avatar Rohan McLure Committed by Michael Ellerman

powerpc: Provide syscall wrapper

Implement syscall wrapper as per s390, x86, arm64. When enabled
cause handlers to accept parameters from a stack frame rather than
from user scratch register state. This allows for user registers to be
safely cleared in order to reduce caller influence on speculation
within syscall routine. The wrapper is a macro that emits syscall
handler symbols that call into the target handler, obtaining its
parameters from a struct pt_regs on the stack.

As registers are already saved to the stack prior to calling
system_call_exception, it appears that this function is executed more
efficiently with the new stack-pointer convention than with parameters
passed by registers, avoiding the allocation of a stack frame for this
method. On a 32-bit system, we see >20% performance increases on the
null_syscall microbenchmark, and on a Power 8 the performance gains
amortise the cost of clearing and restoring registers which is
implemented at the end of this series, seeing final result of ~5.6%
performance improvement on null_syscall.

Syscalls are wrapped in this fashion on all platforms except for the
Cell processor as this commit does not provide SPU support. This can be
quickly fixed in a successive patch, but requires spu_sys_callback to
allocate a pt_regs structure to satisfy the wrapped calling convention.
Co-developed-by: default avatarAndrew Donnellan <ajd@linux.ibm.com>
Signed-off-by: default avatarAndrew Donnellan <ajd@linux.ibm.com>
Signed-off-by: default avatarRohan McLure <rmclure@linux.ibm.com>
Reviewed-by: default avatarNicholas Piggin <npiggin@gmai.com>
[mpe: Make incompatible with COMPAT to retain clearing of high bits of args]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220921065605.1051927-22-rmclure@linux.ibm.com
parent f8971c62
...@@ -137,6 +137,7 @@ config PPC ...@@ -137,6 +137,7 @@ config PPC
select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx || 40x) && !HIBERNATION select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx || 40x) && !HIBERNATION
select ARCH_HAS_STRICT_KERNEL_RWX if PPC_85xx && !HIBERNATION && !RANDOMIZE_BASE select ARCH_HAS_STRICT_KERNEL_RWX if PPC_85xx && !HIBERNATION && !RANDOMIZE_BASE
select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_SYSCALL_WRAPPER if !SPU_BASE && !COMPAT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_UBSAN_SANITIZE_ALL
......
...@@ -14,8 +14,12 @@ ...@@ -14,8 +14,12 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
typedef long (*syscall_fn)(const struct pt_regs *);
#else
typedef long (*syscall_fn)(unsigned long, unsigned long, unsigned long, typedef long (*syscall_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long); unsigned long, unsigned long, unsigned long);
#endif
/* ftrace syscalls requires exporting the sys_call_table */ /* ftrace syscalls requires exporting the sys_call_table */
extern const syscall_fn sys_call_table[]; extern const syscall_fn sys_call_table[];
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* syscall_wrapper.h - powerpc specific wrappers to syscall definitions
*
* Based on arch/{x86,arm64}/include/asm/syscall_wrapper.h
*/
#ifndef __ASM_POWERPC_SYSCALL_WRAPPER_H
#define __ASM_POWERPC_SYSCALL_WRAPPER_H
struct pt_regs;
#define SC_POWERPC_REGS_TO_ARGS(x, ...) \
__MAP(x,__SC_ARGS \
,,regs->gpr[3],,regs->gpr[4],,regs->gpr[5] \
,,regs->gpr[6],,regs->gpr[7],,regs->gpr[8])
#define __SYSCALL_DEFINEx(x, name, ...) \
long __powerpc_sys##name(const struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__powerpc_sys##name, ERRNO); \
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
long __powerpc_sys##name(const struct pt_regs *regs) \
{ \
return __se_sys##name(SC_POWERPC_REGS_TO_ARGS(x,__VA_ARGS__)); \
} \
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
{ \
long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
__MAP(x,__SC_TEST,__VA_ARGS__); \
__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
return ret; \
} \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
long __powerpc_sys_##sname(const struct pt_regs *__unused); \
ALLOW_ERROR_INJECTION(__powerpc_sys_##sname, ERRNO); \
long __powerpc_sys_##sname(const struct pt_regs *__unused)
#define COND_SYSCALL(name) \
long __powerpc_sys_##name(const struct pt_regs *regs); \
long __weak __powerpc_sys_##name(const struct pt_regs *regs) \
{ \
return sys_ni_syscall(); \
}
#define SYS_NI(name) SYSCALL_ALIAS(__powerpc_sys_##name, sys_ni_posix_timers);
#endif // __ASM_POWERPC_SYSCALL_WRAPPER_H
...@@ -15,6 +15,12 @@ ...@@ -15,6 +15,12 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/ucontext.h> #include <asm/ucontext.h>
#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
long sys_ni_syscall(void);
#else
long sys_ni_syscall(const struct pt_regs *regs);
#endif
struct rtas_args; struct rtas_args;
/* /*
...@@ -29,12 +35,12 @@ struct rtas_args; ...@@ -29,12 +35,12 @@ struct rtas_args;
#define merge_64(high, low) (((u64)high << 32) | low) #define merge_64(high, low) (((u64)high << 32) | low)
#endif #endif
long sys_ni_syscall(void);
/* /*
* PowerPC architecture-specific syscalls * PowerPC architecture-specific syscalls
*/ */
#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
long sys_rtas(struct rtas_args __user *uargs); long sys_rtas(struct rtas_args __user *uargs);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
...@@ -114,5 +120,19 @@ long sys_ppc_fadvise64_64(int fd, int advice, ...@@ -114,5 +120,19 @@ long sys_ppc_fadvise64_64(int fd, int advice,
u32 len_high, u32 len_low); u32 len_high, u32 len_low);
#endif #endif
#else
#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
#define __SYSCALL(nr, entry) \
long __powerpc_##entry(const struct pt_regs *regs);
#ifdef CONFIG_PPC64
#include <asm/syscall_table_64.h>
#else
#include <asm/syscall_table_32.h>
#endif /* CONFIG_PPC64 */
#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_SYSCALLS_H */ #endif /* __ASM_POWERPC_SYSCALLS_H */
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
/* Has to run notrace because it is entered not completely "reconciled" */ /* Has to run notrace because it is entered not completely "reconciled" */
notrace long system_call_exception(struct pt_regs *regs, unsigned long r0) notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)
{ {
unsigned long r3, r4, r5, r6, r7, r8;
long ret; long ret;
syscall_fn f; syscall_fn f;
...@@ -145,31 +144,34 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0) ...@@ -145,31 +144,34 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)
return -ENOSYS; return -ENOSYS;
} }
r3 = regs->gpr[3];
r4 = regs->gpr[4];
r5 = regs->gpr[5];
r6 = regs->gpr[6];
r7 = regs->gpr[7];
r8 = regs->gpr[8];
/* May be faster to do array_index_nospec? */ /* May be faster to do array_index_nospec? */
barrier_nospec(); barrier_nospec();
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
// No COMPAT if we have SYSCALL_WRAPPER, see Kconfig
f = (void *)sys_call_table[r0];
ret = f(regs);
#else
if (unlikely(is_compat_task())) { if (unlikely(is_compat_task())) {
unsigned long r3, r4, r5, r6, r7, r8;
f = (void *)compat_sys_call_table[r0]; f = (void *)compat_sys_call_table[r0];
r3 &= 0x00000000ffffffffULL; r3 = regs->gpr[3] & 0x00000000ffffffffULL;
r4 &= 0x00000000ffffffffULL; r4 = regs->gpr[4] & 0x00000000ffffffffULL;
r5 &= 0x00000000ffffffffULL; r5 = regs->gpr[5] & 0x00000000ffffffffULL;
r6 &= 0x00000000ffffffffULL; r6 = regs->gpr[6] & 0x00000000ffffffffULL;
r7 &= 0x00000000ffffffffULL; r7 = regs->gpr[7] & 0x00000000ffffffffULL;
r8 &= 0x00000000ffffffffULL; r8 = regs->gpr[8] & 0x00000000ffffffffULL;
ret = f(r3, r4, r5, r6, r7, r8);
} else { } else {
f = (void *)sys_call_table[r0]; f = (void *)sys_call_table[r0];
}
ret = f(r3, r4, r5, r6, r7, r8); ret = f(regs->gpr[3], regs->gpr[4], regs->gpr[5],
regs->gpr[6], regs->gpr[7], regs->gpr[8]);
}
#endif
/* /*
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(), * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
......
...@@ -15,13 +15,20 @@ ...@@ -15,13 +15,20 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/syscalls.h> #include <asm/syscalls.h>
#undef __SYSCALL_WITH_COMPAT
#define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry) #define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry)
#undef __SYSCALL
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
#define __SYSCALL(nr, entry) [nr] = __powerpc_##entry,
#define __powerpc_sys_ni_syscall sys_ni_syscall
#else
/* /*
* Coerce syscall handlers with arbitrary parameters to common type * Coerce syscall handlers with arbitrary parameters to common type
* requires cast to void* to avoid -Wcast-function-type. * requires cast to void* to avoid -Wcast-function-type.
*/ */
#define __SYSCALL(nr, entry) [nr] = (void *) entry, #define __SYSCALL(nr, entry) [nr] = (void *) entry,
#endif
const syscall_fn sys_call_table[] = { const syscall_fn sys_call_table[] = {
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
......
...@@ -39,6 +39,8 @@ ...@@ -39,6 +39,8 @@
extern char vdso32_start, vdso32_end; extern char vdso32_start, vdso32_end;
extern char vdso64_start, vdso64_end; extern char vdso64_start, vdso64_end;
long sys_ni_syscall(void);
/* /*
* The vdso data page (aka. systemcfg for old ppc64 fans) is here. * The vdso data page (aka. systemcfg for old ppc64 fans) is here.
* Once the early boot kernel code no longer needs to muck around * Once the early boot kernel code no longer needs to muck around
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment