Commit 4efd417f authored by Vasily Gorbik's avatar Vasily Gorbik

s390: raise minimum supported machine generation to z10

Machine generations up to z9 (released in May 2006) have been officially
out of service for several years now (z9 end of service - January 31, 2019).
No distributions build kernels supporting those old machine generations
anymore, except Debian, which seems to pick the oldest supported
generation. The team supporting Debian on s390 has been notified about
the change.

Raising minimum supported machine generation to z10 helps to reduce
maintenance cost and effectively remove code, which is not getting
enough testing coverage due to lack of older hardware and distributions
support. Besides that this unblocks some optimization opportunities and
allows to use wider instruction set in asm files for future features
implementation. Due to this change spectre mitigation and usercopy
implementations could be drastically simplified and many newer instructions
could be converted from ".insn" encoding to instruction names.
Acked-by: default avatarIlya Leoshkevich <iii@linux.ibm.com>
Reviewed-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent 432b1cc7
......@@ -120,7 +120,6 @@ config S390
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS2
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select DMA_OPS if PCI
select DYNAMIC_FTRACE if FUNCTION_TRACER
select GENERIC_ALLOCATOR
......@@ -230,20 +229,8 @@ source "kernel/livepatch/Kconfig"
menu "Processor type and features"
config HAVE_MARCH_Z900_FEATURES
def_bool n
config HAVE_MARCH_Z990_FEATURES
def_bool n
select HAVE_MARCH_Z900_FEATURES
config HAVE_MARCH_Z9_109_FEATURES
def_bool n
select HAVE_MARCH_Z990_FEATURES
config HAVE_MARCH_Z10_FEATURES
def_bool n
select HAVE_MARCH_Z9_109_FEATURES
config HAVE_MARCH_Z196_FEATURES
def_bool n
......@@ -269,41 +256,13 @@ choice
prompt "Processor type"
default MARCH_Z196
config MARCH_Z900
bool "IBM zSeries model z800 and z900"
select HAVE_MARCH_Z900_FEATURES
depends on $(cc-option,-march=z900)
help
Select this to enable optimizations for model z800/z900 (2064 and
2066 series). This will enable some optimizations that are not
available on older ESA/390 (31 Bit) only CPUs.
config MARCH_Z990
bool "IBM zSeries model z890 and z990"
select HAVE_MARCH_Z990_FEATURES
depends on $(cc-option,-march=z990)
help
Select this to enable optimizations for model z890/z990 (2084 and
2086 series). The kernel will be slightly faster but will not work
on older machines.
config MARCH_Z9_109
bool "IBM System z9"
select HAVE_MARCH_Z9_109_FEATURES
depends on $(cc-option,-march=z9-109)
help
Select this to enable optimizations for IBM System z9 (2094 and
2096 series). The kernel will be slightly faster but will not work
on older machines.
config MARCH_Z10
bool "IBM System z10"
select HAVE_MARCH_Z10_FEATURES
depends on $(cc-option,-march=z10)
help
Select this to enable optimizations for IBM System z10 (2097 and
2098 series). The kernel will be slightly faster but will not work
on older machines.
Select this to enable optimizations for IBM System z10 (2097 and 2098
series). This is the oldest machine generation currently supported.
config MARCH_Z196
bool "IBM zEnterprise 114 and 196"
......@@ -352,15 +311,6 @@ config MARCH_Z15
endchoice
config MARCH_Z900_TUNE
def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT
config MARCH_Z990_TUNE
def_bool TUNE_Z990 || MARCH_Z990 && TUNE_DEFAULT
config MARCH_Z9_109_TUNE
def_bool TUNE_Z9_109 || MARCH_Z9_109 && TUNE_DEFAULT
config MARCH_Z10_TUNE
def_bool TUNE_Z10 || MARCH_Z10 && TUNE_DEFAULT
......@@ -396,21 +346,8 @@ config TUNE_DEFAULT
Tune the generated code for the target processor for which the kernel
will be compiled.
config TUNE_Z900
bool "IBM zSeries model z800 and z900"
depends on $(cc-option,-mtune=z900)
config TUNE_Z990
bool "IBM zSeries model z890 and z990"
depends on $(cc-option,-mtune=z990)
config TUNE_Z9_109
bool "IBM System z9"
depends on $(cc-option,-mtune=z9-109)
config TUNE_Z10
bool "IBM System z10"
depends on $(cc-option,-mtune=z10)
config TUNE_Z196
bool "IBM zEnterprise 114 and 196"
......@@ -599,7 +536,6 @@ config EXPOLINE
config EXPOLINE_EXTERN
def_bool n
depends on EXPOLINE
depends on HAVE_MARCH_Z10_FEATURES
depends on CC_IS_GCC && GCC_VERSION >= 110200
depends on $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC))
prompt "Generate expolines as extern functions."
......
......@@ -36,9 +36,6 @@ CHECKFLAGS += -D__s390__ -D__s390x__
export LD_BFD
mflags-$(CONFIG_MARCH_Z900) := -march=z900
mflags-$(CONFIG_MARCH_Z990) := -march=z990
mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
mflags-$(CONFIG_MARCH_Z10) := -march=z10
mflags-$(CONFIG_MARCH_Z196) := -march=z196
mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
......@@ -51,9 +48,6 @@ export CC_FLAGS_MARCH := $(mflags-y)
aflags-y += $(mflags-y)
cflags-y += $(mflags-y)
cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900
cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990
cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10
cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
......
......@@ -256,8 +256,6 @@ static inline bool test_bit_inv(unsigned long nr,
return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
/**
* __flogr - find leftmost one
* @word - The word to search
......@@ -376,16 +374,6 @@ static inline int fls(unsigned int word)
return fls64(word);
}
#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/sched.h>
......
......@@ -163,11 +163,9 @@ struct lowcore {
__u64 gmap; /* 0x03d0 */
__u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */
/* br %r1 trampoline */
__u16 br_r1_trampoline; /* 0x0400 */
__u32 return_lpswe; /* 0x0402 */
__u32 return_mcck_lpswe; /* 0x0406 */
__u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */
__u32 return_lpswe; /* 0x0400 */
__u32 return_mcck_lpswe; /* 0x0404 */
__u8 pad_0x040a[0x0e00-0x0408]; /* 0x0408 */
/*
* 0xe00 contains the address of the IPL Parameter Information
......
......@@ -10,8 +10,6 @@
#ifdef CC_USING_EXPOLINE
_LC_BR_R1 = __LC_BR_R1
/*
* The expoline macros are used to create thunks in the same format
* as gcc generates them. The 'comdat' section flag makes sure that
......@@ -39,7 +37,6 @@ _LC_BR_R1 = __LC_BR_R1
.popsection
.endm
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
.macro __THUNK_PROLOG_BR r1,r2
__THUNK_PROLOG_NAME __s390_indirect_jump_r\r1
.endm
......@@ -55,23 +52,6 @@ _LC_BR_R1 = __LC_BR_R1
.macro __THUNK_BRASL r1,r2,r3
brasl \r1,__s390_indirect_jump_r\r2
.endm
#else
.macro __THUNK_PROLOG_BR r1,r2
__THUNK_PROLOG_NAME __s390_indirect_jump_r\r2\()use_r\r1
.endm
.macro __THUNK_EPILOG_BR r1,r2
__THUNK_EPILOG_NAME __s390_indirect_jump_r\r2\()use_r\r1
.endm
.macro __THUNK_BR r1,r2
jg __s390_indirect_jump_r\r2\()use_r\r1
.endm
.macro __THUNK_BRASL r1,r2,r3
brasl \r1,__s390_indirect_jump_r\r3\()use_r\r2
.endm
#endif
.macro __DECODE_RR expand,reg,ruse
.set __decode_fail,1
......@@ -112,22 +92,8 @@ _LC_BR_R1 = __LC_BR_R1
.endm
.macro __THUNK_EX_BR reg,ruse
# Be very careful when adding instructions to this macro!
# The ALTERNATIVE replacement code has a .+10 which targets
# the "br \reg" after the code has been patched.
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
exrl 0,555f
j .
#else
.ifc \reg,%r1
ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
j .
.else
larl \ruse,555f
ex 0,0(\ruse)
j .
.endif
#endif
555: br \reg
.endm
......
......@@ -187,14 +187,10 @@ static inline unsigned long get_tod_clock(void)
static inline unsigned long get_tod_clock_fast(void)
{
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
unsigned long clk;
asm volatile("stckf %0" : "=Q" (clk) : : "cc");
return clk;
#else
return get_tod_clock();
#endif
}
static inline cycles_t get_cycles(void)
......
......@@ -92,8 +92,6 @@ union oac {
};
};
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
#define __put_get_user_asm(to, from, size, oac_spec) \
({ \
int __rc; \
......@@ -187,22 +185,6 @@ static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsign
return rc;
}
#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
size = raw_copy_to_user(ptr, x, size);
return size ? -EFAULT : 0;
}
static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
size = raw_copy_from_user(x, ptr, size);
return size ? -EFAULT : 0;
}
#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
......
......@@ -122,7 +122,6 @@ int main(void)
OFFSET(__LC_LPP, lowcore, lpp);
OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
OFFSET(__LC_GMAP, lowcore, gmap);
OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
OFFSET(__LC_LAST_BREAK, lowcore, last_break);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
......
......@@ -70,8 +70,6 @@ void show_cacheinfo(struct seq_file *m)
struct cacheinfo *cache;
int idx;
if (!test_facility(34))
return;
this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
cache = this_cpu_ci->info_list + idx;
......@@ -131,8 +129,6 @@ int init_cache_level(unsigned int cpu)
union cache_topology ct;
enum cache_type ctype;
if (!test_facility(34))
return -EOPNOTSUPP;
if (!this_cpu_ci)
return -EINVAL;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
......@@ -156,8 +152,6 @@ int populate_cache_leaves(unsigned int cpu)
union cache_topology ct;
enum cache_type ctype;
if (!test_facility(34))
return -EOPNOTSUPP;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
idx < this_cpu_ci->num_leaves; idx++, level++) {
......
......@@ -58,15 +58,6 @@ asm(
);
#ifdef CONFIG_EXPOLINE
asm(
" .align 16\n"
"ftrace_shared_hotpatch_trampoline_ex:\n"
" lmg %r0,%r1,2(%r1)\n"
" ex %r0," __stringify(__LC_BR_R1) "(%r0)\n"
" j .\n"
"ftrace_shared_hotpatch_trampoline_ex_end:\n"
);
asm(
" .align 16\n"
"ftrace_shared_hotpatch_trampoline_exrl:\n"
......@@ -90,12 +81,8 @@ static const char *ftrace_shared_hotpatch_trampoline(const char **end)
tend = ftrace_shared_hotpatch_trampoline_br_end;
#ifdef CONFIG_EXPOLINE
if (!nospec_disable) {
tstart = ftrace_shared_hotpatch_trampoline_ex;
tend = ftrace_shared_hotpatch_trampoline_ex_end;
if (test_facility(35)) { /* exrl */
tstart = ftrace_shared_hotpatch_trampoline_exrl;
tend = ftrace_shared_hotpatch_trampoline_exrl_end;
}
tstart = ftrace_shared_hotpatch_trampoline_exrl;
tend = ftrace_shared_hotpatch_trampoline_exrl_end;
}
#endif /* CONFIG_EXPOLINE */
if (end)
......
......@@ -16,8 +16,6 @@ extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_start[];
extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_end[];
extern const char ftrace_shared_hotpatch_trampoline_br[];
extern const char ftrace_shared_hotpatch_trampoline_br_end[];
extern const char ftrace_shared_hotpatch_trampoline_ex[];
extern const char ftrace_shared_hotpatch_trampoline_ex_end[];
extern const char ftrace_shared_hotpatch_trampoline_exrl[];
extern const char ftrace_shared_hotpatch_trampoline_exrl_end[];
extern const char ftrace_plt_template[];
......
......@@ -35,14 +35,8 @@ ENDPROC(ftrace_stub)
.if \allregs == 1
# save psw mask
# don't put any instructions clobbering CC before this point
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
epsw %r1,%r14
risbg %r14,%r1,0,31,32
#else
epsw %r14,%r1
sllg %r14,%r14,32
lr %r14,%r1
#endif
.endif
lgr %r1,%r15
......@@ -58,12 +52,7 @@ ENDPROC(ftrace_stub)
.if \allregs == 1
stg %r14,(STACK_PTREGS_PSW)(%r15)
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
mvghi STACK_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS
#else
lghi %r14,_PIF_FTRACE_FULL_REGS
stg %r14,STACK_PTREGS_FLAGS(%r15)
#endif
.else
xc STACK_PTREGS_FLAGS(8,%r15),STACK_PTREGS_FLAGS(%r15)
.endif
......
......@@ -517,15 +517,9 @@ int module_finalize(const Elf_Ehdr *hdr,
ij = me->core_layout.base + me->arch.plt_offset +
me->arch.plt_size - PLT_ENTRY_SIZE;
if (test_facility(35)) {
ij[0] = 0xc6000000; /* exrl %r0,.+10 */
ij[1] = 0x0005a7f4; /* j . */
ij[2] = 0x000007f1; /* br %r1 */
} else {
ij[0] = 0x44000000 | (unsigned int)
offsetof(struct lowcore, br_r1_trampoline);
ij[1] = 0xa7f40000; /* j . */
}
ij[0] = 0xc6000000; /* exrl %r0,.+10 */
ij[1] = 0x0005a7f4; /* j . */
ij[2] = 0x000007f1; /* br %r1 */
}
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
......
......@@ -118,12 +118,6 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
if (thunk[0] == 0xc6 && thunk[1] == 0x00)
/* exrl %r0,<target-br> */
br = thunk + (*(int *)(thunk + 2)) * 2;
else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
thunk[6] == 0x44 && thunk[7] == 0x00 &&
(thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
(thunk[1] & 0xf0) == (thunk[8] & 0xf0))
/* larl %rx,<target br> + ex %r0,0(%rx) */
br = thunk + (*(int *)(thunk + 2)) * 2;
else
continue;
if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
......
......@@ -1451,6 +1451,8 @@ static size_t cfdiag_maxsize(struct cpumf_ctr_info *info)
/* Get the CPU speed, try sampling facility first and CPU attributes second. */
static void cfdiag_get_cpu_speed(void)
{
unsigned long mhz;
if (cpum_sf_avail()) { /* Sampling facility first */
struct hws_qsi_info_block si;
......@@ -1464,12 +1466,9 @@ static void cfdiag_get_cpu_speed(void)
/* Fallback: CPU speed extract static part. Used in case
* CPU Measurement Sampling Facility is turned off.
*/
if (test_facility(34)) {
unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
if (mhz != -1UL)
cfdiag_cpu_speed = mhz & 0xffffffff;
}
mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
if (mhz != -1UL)
cfdiag_cpu_speed = mhz & 0xffffffff;
}
static int cfset_init(void)
......
......@@ -172,8 +172,7 @@ static void show_cpu_summary(struct seq_file *m, void *v)
static int __init setup_hwcaps(void)
{
/* instructions named N3, "backported" to esa-mode */
if (test_facility(0))
elf_hwcap |= HWCAP_ESAN3;
elf_hwcap |= HWCAP_ESAN3;
/* z/Architecture mode active */
elf_hwcap |= HWCAP_ZARCH;
......@@ -191,8 +190,7 @@ static int __init setup_hwcaps(void)
elf_hwcap |= HWCAP_LDISP;
/* extended-immediate */
if (test_facility(21))
elf_hwcap |= HWCAP_EIMM;
elf_hwcap |= HWCAP_EIMM;
/* extended-translation facility 3 enhancement */
if (test_facility(22) && test_facility(30))
......@@ -262,21 +260,7 @@ static int __init setup_elf_platform(void)
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
case 0x2064:
case 0x2066:
default: /* Use "z900" as default for 64 bit kernels. */
strcpy(elf_platform, "z900");
break;
case 0x2084:
case 0x2086:
strcpy(elf_platform, "z990");
break;
case 0x2094:
case 0x2096:
strcpy(elf_platform, "z9-109");
break;
case 0x2097:
case 0x2098:
default: /* Use "z10" as default. */
strcpy(elf_platform, "z10");
break;
case 0x2817:
......
......@@ -490,7 +490,6 @@ static void __init setup_lowcore_dat_off(void)
lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc->preempt_count = PREEMPT_DISABLED;
......
......@@ -207,7 +207,6 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
lc->preempt_count = PREEMPT_DISABLED;
......
......@@ -177,9 +177,7 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
__typeof__(*(ptr)) input; \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
else if ((u64 __force)ptr & mask) \
if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (get_user(input, ptr)) \
__rc = EMU_ADDRESSING; \
......@@ -194,9 +192,7 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
__typeof__(ptr) __ptr = (ptr); \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
else if ((u64 __force)__ptr & mask) \
if ((u64 __force)__ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (put_user(*(input), __ptr)) \
__rc = EMU_ADDRESSING; \
......@@ -213,9 +209,7 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
__typeof__(*(ptr)) input; \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
else if ((u64 __force)ptr & mask) \
if ((u64 __force)ptr & mask) \
__rc = EMU_SPECIFICATION; \
else if (get_user(input, ptr)) \
__rc = EMU_ADDRESSING; \
......@@ -327,10 +321,6 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
break;
case 0xc6:
switch (insn->opc1) {
case 0x02: /* pfdrl */
if (!test_facility(34))
rc = EMU_ILLEGAL_OP;
break;
case 0x04: /* cghrl */
rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64);
break;
......
......@@ -8,14 +8,10 @@
* Gerald Schaefer (gerald.schaefer@de.ibm.com)
*/
#include <linux/jump_label.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <asm/asm-extable.h>
#include <asm/mmu_context.h>
#include <asm/facility.h>
#ifdef CONFIG_DEBUG_ENTRY
void debug_user_asce(int exit)
......@@ -35,32 +31,8 @@ void debug_user_asce(int exit)
}
#endif /*CONFIG_DEBUG_ENTRY */
#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
static DEFINE_STATIC_KEY_FALSE(have_mvcos);
static int __init uaccess_init(void)
{
if (test_facility(27))
static_branch_enable(&have_mvcos);
return 0;
}
early_initcall(uaccess_init);
static inline int copy_with_mvcos(void)
{
if (static_branch_likely(&have_mvcos))
return 1;
return 0;
}
#else
static inline int copy_with_mvcos(void)
{
return 1;
}
#endif
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
unsigned long size, unsigned long key)
static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
unsigned long size, unsigned long key)
{
unsigned long tmp1, tmp2;
union oac spec = {
......@@ -90,55 +62,12 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
"4: slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: "+a" (size), "+a" (from), "+a" (to), "+a" (tmp1), "=a" (tmp2)
: [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
unsigned long size, unsigned long key)
{
unsigned long tmp1, tmp2;
tmp1 = -256UL;
asm volatile(
" sacf 0\n"
"0: mvcp 0(%0,%2),0(%1),%[key]\n"
"7: jz 5f\n"
"1: algr %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcp 0(%0,%2),0(%1),%[key]\n"
"8: jnz 1b\n"
" j 5f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
" lghi %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
" slgr %4,%1\n"
" clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"4: mvcp 0(%4,%2),0(%1),%[key]\n"
"9: slgr %0,%4\n"
" j 6f\n"
"5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: [key] "d" (key << 4)
: "cc", "memory");
return size;
}
static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
unsigned long n, unsigned long key)
{
if (copy_with_mvcos())
return copy_from_user_mvcos(to, from, n, key);
return copy_from_user_mvcp(to, from, n, key);
}
unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return raw_copy_from_user_key(to, from, n, 0);
......@@ -161,8 +90,8 @@ unsigned long _copy_from_user_key(void *to, const void __user *from,
}
EXPORT_SYMBOL(_copy_from_user_key);
static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
unsigned long size, unsigned long key)
static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
unsigned long size, unsigned long key)
{
unsigned long tmp1, tmp2;
union oac spec = {
......@@ -192,55 +121,12 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
"4: slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
: [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
unsigned long size, unsigned long key)
{
unsigned long tmp1, tmp2;
tmp1 = -256UL;
asm volatile(
" sacf 0\n"
"0: mvcs 0(%0,%1),0(%2),%[key]\n"
"7: jz 5f\n"
"1: algr %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcs 0(%0,%1),0(%2),%[key]\n"
"8: jnz 1b\n"
" j 5f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
" lghi %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
" slgr %4,%1\n"
" clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"4: mvcs 0(%4,%1),0(%2),%[key]\n"
"9: slgr %0,%4\n"
" j 6f\n"
"5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: [key] "d" (key << 4)
: "cc", "memory");
return size;
}
static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
unsigned long n, unsigned long key)
{
if (copy_with_mvcos())
return copy_to_user_mvcos(to, from, n, key);
return copy_to_user_mvcs(to, from, n, key);
}
unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return raw_copy_to_user_key(to, from, n, 0);
......@@ -258,7 +144,7 @@ unsigned long _copy_to_user_key(void __user *to, const void *from,
}
EXPORT_SYMBOL(_copy_to_user_key);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
unsigned long __clear_user(void __user *to, unsigned long size)
{
unsigned long tmp1, tmp2;
union oac spec = {
......@@ -290,46 +176,4 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
: "cc", "memory", "0");
return size;
}
static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
{
unsigned long tmp1, tmp2;
asm volatile(
" sacf 256\n"
" aghi %0,-1\n"
" jo 5f\n"
" bras %3,3f\n"
" xc 0(1,%1),0(%1)\n"
"0: aghi %0,257\n"
" la %2,255(%1)\n" /* %2 = ptr + 255 */
" srl %2,12\n"
" sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
" slgr %2,%1\n"
" clgr %0,%2\n" /* clear crosses next page boundary? */
" jnh 5f\n"
" aghi %2,-1\n"
"1: ex %2,0(%3)\n"
" aghi %2,1\n"
" slgr %0,%2\n"
" j 5f\n"
"2: xc 0(256,%1),0(%1)\n"
" la %1,256(%1)\n"
"3: aghi %0,-256\n"
" jnm 2b\n"
"4: ex %0,0(%3)\n"
"5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
return size;
}
unsigned long __clear_user(void __user *to, unsigned long size)
{
if (copy_with_mvcos())
return clear_user_mvcos(to, size);
return clear_user_xc(to, size);
}
EXPORT_SYMBOL(__clear_user);
......@@ -584,13 +584,9 @@ void __init vmem_map_init(void)
__set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
if (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)) {
/*
* Lowcore must be executable for LPSWE
* and expoline trampoline branch instructions.
*/
/* lowcore must be executable for LPSWE */
if (!static_key_enabled(&cpu_has_bear))
set_memory_x(0, 1);
}
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
......
......@@ -570,15 +570,8 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
if (nospec_uses_trampoline()) {
jit->r14_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r14 thunk */
if (test_facility(35)) {
/* exrl %r0,.+10 */
EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
} else {
/* larl %r1,.+14 */
EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
/* ex 0,0(%r1) */
EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
}
/* exrl %r0,.+10 */
EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
}
......@@ -589,20 +582,12 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
(is_first_pass(jit) || (jit->seen & SEEN_FUNC))) {
jit->r1_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r1 thunk */
if (test_facility(35)) {
/* exrl %r0,.+10 */
EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
/* br %r1 */
_EMIT2(0x07f1);
} else {
/* ex 0,S390_lowcore.br_r1_tampoline */
EMIT4_DISP(0x44000000, REG_0, REG_0,
offsetof(struct lowcore, br_r1_trampoline));
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
}
/* exrl %r0,.+10 */
EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
/* br %r1 */
_EMIT2(0x07f1);
}
}
......
......@@ -27,24 +27,16 @@ static struct facility_def facility_defs[] = {
*/
.name = "FACILITIES_ALS",
.bits = (int[]){
#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
0, /* N3 instructions */
1, /* z/Arch mode installed */
#endif
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
18, /* long displacement facility */
#endif
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
21, /* extended-immediate facility */
25, /* store clock fast */
#endif
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
27, /* mvcos */
32, /* compare and swap and store */
33, /* compare and swap and store 2 */
34, /* general instructions extension */
35, /* execute extensions */
#endif
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
45, /* fast-BCR, etc. */
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment