Commit 65ad409e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-6.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Vasily Gorbik:

 - Fix KMSAN build breakage caused by the conflict between s390 and
   mm-stable trees

 - Add KMSAN page markers for ptdump

 - Add runtime constant support

 - Fix __pa/__va for modules under non-GPL licenses by exporting
   necessary vm_layout struct with EXPORT_SYMBOL to prevent linkage
   problems

 - Fix an endless loop in the CF_DIAG event stop in the CPU Measurement
   Counter Facility code when the counter set size is zero

 - Remove the PROTECTED_VIRTUALIZATION_GUEST config option and enable
   its functionality by default

 - Support allocation of multiple MSI interrupts per device and improve
   logging of architecture-specific limitations

 - Add support for lowcore relocation as a debugging feature to catch
   all null ptr dereferences in the kernel address space, improving
   detection beyond the current implementation's limited write access
   protection

 - Clean up and rework CPU alternatives to allow for callbacks and early
   patching for the lowcore relocation

* tag 's390-6.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (39 commits)
  s390: Remove protvirt and kvm config guards for uv code
  s390/boot: Add cmdline option to relocate lowcore
  s390/kdump: Make kdump ready for lowcore relocation
  s390/entry: Make system_call() ready for lowcore relocation
  s390/entry: Make ret_from_fork() ready for lowcore relocation
  s390/entry: Make __switch_to() ready for lowcore relocation
  s390/entry: Make restart_int_handler() ready for lowcore relocation
  s390/entry: Make mchk_int_handler() ready for lowcore relocation
  s390/entry: Make int handlers ready for lowcore relocation
  s390/entry: Make pgm_check_handler() ready for lowcore relocation
  s390/entry: Add base register to CHECK_VMAP_STACK/CHECK_STACK macro
  s390/entry: Add base register to SIEEXIT macro
  s390/entry: Add base register to MBEAR macro
  s390/entry: Make __sie64a() ready for lowcore relocation
  s390/head64: Make startup code ready for lowcore relocation
  s390: Add infrastructure to patch lowcore accesses
  s390/atomic_ops: Disable flag outputs constraint for GCC versions below 14.2.0
  s390/entry: Move SIE indicator flag to thread info
  s390/nmi: Simplify ptregs setup
  s390/alternatives: Remove alternative facility list
  ...
parents a6294b5b 6dc2e98d
...@@ -3830,9 +3830,6 @@ ...@@ -3830,9 +3830,6 @@
noalign [KNL,ARM] noalign [KNL,ARM]
noaltinstr [S390,EARLY] Disables alternative instructions
patching (CPU alternatives feature).
noapic [SMP,APIC,EARLY] Tells the kernel to not make use of any noapic [SMP,APIC,EARLY] Tells the kernel to not make use of any
IOAPICs that may be present in the system. IOAPICs that may be present in the system.
......
...@@ -799,17 +799,6 @@ config HAVE_PNETID ...@@ -799,17 +799,6 @@ config HAVE_PNETID
menu "Virtualization" menu "Virtualization"
config PROTECTED_VIRTUALIZATION_GUEST
def_bool n
prompt "Protected virtualization guest support"
help
Select this option, if you want to be able to run this
kernel as a protected virtualization KVM guest.
Protected virtualization capable machines have a mini hypervisor
located at machine level (an ultravisor). With help of the
Ultravisor, KVM will be able to run "protected" VMs, special
VMs whose memory and management data are unavailable to KVM.
config PFAULT config PFAULT
def_bool y def_bool y
prompt "Pseudo page fault support" prompt "Pseudo page fault support"
......
...@@ -39,8 +39,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char ...@@ -39,8 +39,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o uv.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
......
// SPDX-License-Identifier: GPL-2.0
#include "../kernel/alternative.c"
...@@ -30,6 +30,8 @@ struct vmlinux_info { ...@@ -30,6 +30,8 @@ struct vmlinux_info {
unsigned long init_mm_off; unsigned long init_mm_off;
unsigned long swapper_pg_dir_off; unsigned long swapper_pg_dir_off;
unsigned long invalid_pg_dir_off; unsigned long invalid_pg_dir_off;
unsigned long alt_instructions;
unsigned long alt_instructions_end;
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
unsigned long kasan_early_shadow_page_off; unsigned long kasan_early_shadow_page_off;
unsigned long kasan_early_shadow_pte_off; unsigned long kasan_early_shadow_pte_off;
...@@ -89,8 +91,10 @@ extern char _end[], _decompressor_end[]; ...@@ -89,8 +91,10 @@ extern char _end[], _decompressor_end[];
extern unsigned char _compressed_start[]; extern unsigned char _compressed_start[];
extern unsigned char _compressed_end[]; extern unsigned char _compressed_end[];
extern struct vmlinux_info _vmlinux_info; extern struct vmlinux_info _vmlinux_info;
#define vmlinux _vmlinux_info #define vmlinux _vmlinux_info
#define __lowcore_pa(x) ((unsigned long)(x) % sizeof(struct lowcore))
#define __abs_lowcore_pa(x) (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore)) #define __abs_lowcore_pa(x) (((unsigned long)(x) - __abs_lowcore) % sizeof(struct lowcore))
#define __kernel_va(x) ((void *)((unsigned long)(x) - __kaslr_offset_phys + __kaslr_offset)) #define __kernel_va(x) ((void *)((unsigned long)(x) - __kaslr_offset_phys + __kaslr_offset))
#define __kernel_pa(x) ((unsigned long)(x) - __kaslr_offset + __kaslr_offset_phys) #define __kernel_pa(x) ((unsigned long)(x) - __kaslr_offset + __kaslr_offset_phys)
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/abs_lowcore.h>
#include <asm/page-states.h> #include <asm/page-states.h>
#include <asm/ebcdic.h> #include <asm/ebcdic.h>
#include <asm/sclp.h> #include <asm/sclp.h>
...@@ -310,5 +311,7 @@ void parse_boot_command_line(void) ...@@ -310,5 +311,7 @@ void parse_boot_command_line(void)
prot_virt_host = 1; prot_virt_host = 1;
} }
#endif #endif
if (!strcmp(param, "relocate_lowcore") && test_facility(193))
relocate_lowcore = 1;
} }
} }
...@@ -30,6 +30,7 @@ unsigned long __bootdata_preserved(vmemmap_size); ...@@ -30,6 +30,7 @@ unsigned long __bootdata_preserved(vmemmap_size);
unsigned long __bootdata_preserved(MODULES_VADDR); unsigned long __bootdata_preserved(MODULES_VADDR);
unsigned long __bootdata_preserved(MODULES_END); unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata_preserved(max_mappable); unsigned long __bootdata_preserved(max_mappable);
int __bootdata_preserved(relocate_lowcore);
u64 __bootdata_preserved(stfle_fac_list[16]); u64 __bootdata_preserved(stfle_fac_list[16]);
struct oldmem_data __bootdata_preserved(oldmem_data); struct oldmem_data __bootdata_preserved(oldmem_data);
...@@ -376,6 +377,8 @@ static void kaslr_adjust_vmlinux_info(long offset) ...@@ -376,6 +377,8 @@ static void kaslr_adjust_vmlinux_info(long offset)
vmlinux.init_mm_off += offset; vmlinux.init_mm_off += offset;
vmlinux.swapper_pg_dir_off += offset; vmlinux.swapper_pg_dir_off += offset;
vmlinux.invalid_pg_dir_off += offset; vmlinux.invalid_pg_dir_off += offset;
vmlinux.alt_instructions += offset;
vmlinux.alt_instructions_end += offset;
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
vmlinux.kasan_early_shadow_page_off += offset; vmlinux.kasan_early_shadow_page_off += offset;
vmlinux.kasan_early_shadow_pte_off += offset; vmlinux.kasan_early_shadow_pte_off += offset;
...@@ -478,8 +481,12 @@ void startup_kernel(void) ...@@ -478,8 +481,12 @@ void startup_kernel(void)
* before the kernel started. Therefore, in case the two sections * before the kernel started. Therefore, in case the two sections
* overlap there is no risk of corrupting any data. * overlap there is no risk of corrupting any data.
*/ */
if (kaslr_enabled()) if (kaslr_enabled()) {
amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G); unsigned long amode31_min;
amode31_min = (unsigned long)_decompressor_end;
amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G);
}
if (!amode31_lma) if (!amode31_lma)
amode31_lma = __kaslr_offset_phys - vmlinux.amode31_size; amode31_lma = __kaslr_offset_phys - vmlinux.amode31_size;
physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size); physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
...@@ -503,6 +510,9 @@ void startup_kernel(void) ...@@ -503,6 +510,9 @@ void startup_kernel(void)
kaslr_adjust_got(__kaslr_offset); kaslr_adjust_got(__kaslr_offset);
setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit); setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
copy_bootdata(); copy_bootdata();
__apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions,
(struct alt_instr *)_vmlinux_info.alt_instructions_end,
ALT_CTX_EARLY);
/* /*
* Save KASLR offset for early dumps, before vmcore_info is set. * Save KASLR offset for early dumps, before vmcore_info is set.
......
...@@ -8,12 +8,8 @@ ...@@ -8,12 +8,8 @@
#include "uv.h" #include "uv.h"
/* will be used in arch/s390/kernel/uv.c */ /* will be used in arch/s390/kernel/uv.c */
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
int __bootdata_preserved(prot_virt_guest); int __bootdata_preserved(prot_virt_guest);
#endif
#if IS_ENABLED(CONFIG_KVM)
int __bootdata_preserved(prot_virt_host); int __bootdata_preserved(prot_virt_host);
#endif
struct uv_info __bootdata_preserved(uv_info); struct uv_info __bootdata_preserved(uv_info);
void uv_query_info(void) void uv_query_info(void)
...@@ -53,14 +49,11 @@ void uv_query_info(void) ...@@ -53,14 +49,11 @@ void uv_query_info(void)
uv_info.max_secrets = uvcb.max_secrets; uv_info.max_secrets = uvcb.max_secrets;
} }
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) && if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
test_bit_inv(BIT_UVC_CMD_REMOVE_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list)) test_bit_inv(BIT_UVC_CMD_REMOVE_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list))
prot_virt_guest = 1; prot_virt_guest = 1;
#endif
} }
#if IS_ENABLED(CONFIG_KVM)
unsigned long adjust_to_uv_max(unsigned long limit) unsigned long adjust_to_uv_max(unsigned long limit)
{ {
if (is_prot_virt_host() && uv_info.max_sec_stor_addr) if (is_prot_virt_host() && uv_info.max_sec_stor_addr)
...@@ -92,4 +85,3 @@ void sanitize_prot_virt_host(void) ...@@ -92,4 +85,3 @@ void sanitize_prot_virt_host(void)
{ {
prot_virt_host = is_prot_virt_host_capable(); prot_virt_host = is_prot_virt_host_capable();
} }
#endif
...@@ -2,21 +2,8 @@ ...@@ -2,21 +2,8 @@
#ifndef BOOT_UV_H #ifndef BOOT_UV_H
#define BOOT_UV_H #define BOOT_UV_H
#if IS_ENABLED(CONFIG_KVM)
unsigned long adjust_to_uv_max(unsigned long limit); unsigned long adjust_to_uv_max(unsigned long limit);
void sanitize_prot_virt_host(void); void sanitize_prot_virt_host(void);
#else
static inline unsigned long adjust_to_uv_max(unsigned long limit)
{
return limit;
}
static inline void sanitize_prot_virt_host(void) {}
#endif
#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
void uv_query_info(void); void uv_query_info(void);
#else
static inline void uv_query_info(void) {}
#endif
#endif /* BOOT_UV_H */ #endif /* BOOT_UV_H */
...@@ -26,6 +26,7 @@ atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]); ...@@ -26,6 +26,7 @@ atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
enum populate_mode { enum populate_mode {
POPULATE_NONE, POPULATE_NONE,
POPULATE_DIRECT, POPULATE_DIRECT,
POPULATE_LOWCORE,
POPULATE_ABS_LOWCORE, POPULATE_ABS_LOWCORE,
POPULATE_IDENTITY, POPULATE_IDENTITY,
POPULATE_KERNEL, POPULATE_KERNEL,
...@@ -242,6 +243,8 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m ...@@ -242,6 +243,8 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
return -1; return -1;
case POPULATE_DIRECT: case POPULATE_DIRECT:
return addr; return addr;
case POPULATE_LOWCORE:
return __lowcore_pa(addr);
case POPULATE_ABS_LOWCORE: case POPULATE_ABS_LOWCORE:
return __abs_lowcore_pa(addr); return __abs_lowcore_pa(addr);
case POPULATE_KERNEL: case POPULATE_KERNEL:
...@@ -418,6 +421,7 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat ...@@ -418,6 +421,7 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit) void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit)
{ {
unsigned long lowcore_address = 0;
unsigned long start, end; unsigned long start, end;
unsigned long asce_type; unsigned long asce_type;
unsigned long asce_bits; unsigned long asce_bits;
...@@ -455,12 +459,17 @@ void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned l ...@@ -455,12 +459,17 @@ void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned l
__arch_set_page_dat((void *)swapper_pg_dir, 1UL << CRST_ALLOC_ORDER); __arch_set_page_dat((void *)swapper_pg_dir, 1UL << CRST_ALLOC_ORDER);
__arch_set_page_dat((void *)invalid_pg_dir, 1UL << CRST_ALLOC_ORDER); __arch_set_page_dat((void *)invalid_pg_dir, 1UL << CRST_ALLOC_ORDER);
if (relocate_lowcore)
lowcore_address = LOWCORE_ALT_ADDRESS;
/* /*
* To allow prefixing the lowcore must be mapped with 4KB pages. * To allow prefixing the lowcore must be mapped with 4KB pages.
* To prevent creation of a large page at address 0 first map * To prevent creation of a large page at address 0 first map
* the lowcore and create the identity mapping only afterwards. * the lowcore and create the identity mapping only afterwards.
*/ */
pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT); pgtable_populate(lowcore_address,
lowcore_address + sizeof(struct lowcore),
POPULATE_LOWCORE);
for_each_physmem_usable_range(i, &start, &end) { for_each_physmem_usable_range(i, &start, &end) {
pgtable_populate((unsigned long)__identity_va(start), pgtable_populate((unsigned long)__identity_va(start),
(unsigned long)__identity_va(end), (unsigned long)__identity_va(end),
......
...@@ -55,7 +55,6 @@ CONFIG_EXPOLINE_AUTO=y ...@@ -55,7 +55,6 @@ CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m CONFIG_VFIO_CCW=m
CONFIG_VFIO_AP=m CONFIG_VFIO_AP=m
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y CONFIG_APPLDATA_BASE=y
CONFIG_S390_HYPFS_FS=y CONFIG_S390_HYPFS_FS=y
......
...@@ -53,7 +53,6 @@ CONFIG_EXPOLINE_AUTO=y ...@@ -53,7 +53,6 @@ CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m CONFIG_VFIO_CCW=m
CONFIG_VFIO_AP=m CONFIG_VFIO_AP=m
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y CONFIG_APPLDATA_BASE=y
CONFIG_S390_HYPFS_FS=y CONFIG_S390_HYPFS_FS=y
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#ifndef _ASM_S390_ABS_LOWCORE_H #ifndef _ASM_S390_ABS_LOWCORE_H
#define _ASM_S390_ABS_LOWCORE_H #define _ASM_S390_ABS_LOWCORE_H
#include <asm/sections.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#define ABS_LOWCORE_MAP_SIZE (NR_CPUS * sizeof(struct lowcore)) #define ABS_LOWCORE_MAP_SIZE (NR_CPUS * sizeof(struct lowcore))
...@@ -24,4 +25,11 @@ static inline void put_abs_lowcore(struct lowcore *lc) ...@@ -24,4 +25,11 @@ static inline void put_abs_lowcore(struct lowcore *lc)
put_cpu(); put_cpu();
} }
extern int __bootdata_preserved(relocate_lowcore);
static inline int have_relocated_lowcore(void)
{
return relocate_lowcore;
}
#endif /* _ASM_S390_ABS_LOWCORE_H */ #endif /* _ASM_S390_ABS_LOWCORE_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_ALTERNATIVE_ASM_H
#define _ASM_S390_ALTERNATIVE_ASM_H
#ifdef __ASSEMBLY__
/*
* Issue one struct alt_instr descriptor entry (need to put it into
* the section .altinstructions, see below). This entry contains
* enough information for the alternatives patching code to patch an
* instruction. See apply_alternatives().
*/
.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
.long \orig_start - .
.long \alt_start - .
.word \feature
.byte \orig_end - \orig_start
.org . - ( \orig_end - \orig_start ) & 1
.org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start )
.org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start )
.endm
/*
* Define an alternative between two instructions. If @feature is
* present, early code in apply_alternatives() replaces @oldinstr with
* @newinstr.
*/
.macro ALTERNATIVE oldinstr, newinstr, feature
.pushsection .altinstr_replacement,"ax"
770: \newinstr
771: .popsection
772: \oldinstr
773: .pushsection .altinstructions,"a"
alt_entry 772b, 773b, 770b, 771b, \feature
.popsection
.endm
/*
* Define an alternative between two instructions. If @feature is
* present, early code in apply_alternatives() replaces @oldinstr with
* @newinstr.
*/
.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
.pushsection .altinstr_replacement,"ax"
770: \newinstr1
771: \newinstr2
772: .popsection
773: \oldinstr
774: .pushsection .altinstructions,"a"
alt_entry 773b, 774b, 770b, 771b,\feature1
alt_entry 773b, 774b, 771b, 772b,\feature2
.popsection
.endm
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_ALTERNATIVE_ASM_H */
...@@ -2,6 +2,58 @@ ...@@ -2,6 +2,58 @@
#ifndef _ASM_S390_ALTERNATIVE_H #ifndef _ASM_S390_ALTERNATIVE_H
#define _ASM_S390_ALTERNATIVE_H #define _ASM_S390_ALTERNATIVE_H
/*
* Each alternative comes with a 32 bit feature field:
* union {
* u32 feature;
* struct {
* u32 ctx : 4;
* u32 type : 8;
* u32 data : 20;
* };
* }
*
* @ctx is a bitfield, where only one bit must be set. Each bit defines
* in which context an alternative is supposed to be applied to the
* kernel image:
*
* - from the decompressor before the kernel itself is executed
* - from early kernel code from within the kernel
*
* @type is a number which defines the type and with that the type
* specific alternative patching.
*
* @data is additional type specific information which defines if an
* alternative should be applied.
*/
#define ALT_CTX_EARLY 1
#define ALT_CTX_LATE 2
#define ALT_CTX_ALL (ALT_CTX_EARLY | ALT_CTX_LATE)
#define ALT_TYPE_FACILITY 0
#define ALT_TYPE_SPEC 1
#define ALT_TYPE_LOWCORE 2
#define ALT_DATA_SHIFT 0
#define ALT_TYPE_SHIFT 20
#define ALT_CTX_SHIFT 28
#define ALT_FACILITY_EARLY(facility) (ALT_CTX_EARLY << ALT_CTX_SHIFT | \
ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \
(facility) << ALT_DATA_SHIFT)
#define ALT_FACILITY(facility) (ALT_CTX_LATE << ALT_CTX_SHIFT | \
ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \
(facility) << ALT_DATA_SHIFT)
#define ALT_SPEC(facility) (ALT_CTX_LATE << ALT_CTX_SHIFT | \
ALT_TYPE_SPEC << ALT_TYPE_SHIFT | \
(facility) << ALT_DATA_SHIFT)
#define ALT_LOWCORE (ALT_CTX_EARLY << ALT_CTX_SHIFT | \
ALT_TYPE_LOWCORE << ALT_TYPE_SHIFT)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/types.h> #include <linux/types.h>
...@@ -11,12 +63,30 @@ ...@@ -11,12 +63,30 @@
struct alt_instr { struct alt_instr {
s32 instr_offset; /* original instruction */ s32 instr_offset; /* original instruction */
s32 repl_offset; /* offset to replacement instruction */ s32 repl_offset; /* offset to replacement instruction */
u16 facility; /* facility bit set for replacement */ union {
u32 feature; /* feature required for replacement */
struct {
u32 ctx : 4; /* context */
u32 type : 8; /* type of alternative */
u32 data : 20; /* patching information */
};
};
u8 instrlen; /* length of original instruction */ u8 instrlen; /* length of original instruction */
} __packed; } __packed;
void apply_alternative_instructions(void); extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
void __apply_alternatives(struct alt_instr *start, struct alt_instr *end, unsigned int ctx);
static inline void apply_alternative_instructions(void)
{
__apply_alternatives(__alt_instructions, __alt_instructions_end, ALT_CTX_LATE);
}
static inline void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
__apply_alternatives(start, end, ALT_CTX_ALL);
}
/* /*
* +---------------------------------+ * +---------------------------------+
...@@ -48,10 +118,10 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); ...@@ -48,10 +118,10 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
#define OLDINSTR(oldinstr) \ #define OLDINSTR(oldinstr) \
"661:\n\t" oldinstr "\n662:\n" "661:\n\t" oldinstr "\n662:\n"
#define ALTINSTR_ENTRY(facility, num) \ #define ALTINSTR_ENTRY(feature, num) \
"\t.long 661b - .\n" /* old instruction */ \ "\t.long 661b - .\n" /* old instruction */ \
"\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \ "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
"\t.word " __stringify(facility) "\n" /* facility bit */ \ "\t.long " __stringify(feature) "\n" /* feature */ \
"\t.byte " oldinstr_len "\n" /* instruction len */ \ "\t.byte " oldinstr_len "\n" /* instruction len */ \
"\t.org . - (" oldinstr_len ") & 1\n" \ "\t.org . - (" oldinstr_len ") & 1\n" \
"\t.org . - (" oldinstr_len ") + (" altinstr_len(num) ")\n" \ "\t.org . - (" oldinstr_len ") + (" altinstr_len(num) ")\n" \
...@@ -61,24 +131,24 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); ...@@ -61,24 +131,24 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n"
/* alternative assembly primitive: */ /* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, altinstr, facility) \ #define ALTERNATIVE(oldinstr, altinstr, feature) \
".pushsection .altinstr_replacement, \"ax\"\n" \ ".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(altinstr, 1) \ ALTINSTR_REPLACEMENT(altinstr, 1) \
".popsection\n" \ ".popsection\n" \
OLDINSTR(oldinstr) \ OLDINSTR(oldinstr) \
".pushsection .altinstructions,\"a\"\n" \ ".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(facility, 1) \ ALTINSTR_ENTRY(feature, 1) \
".popsection\n" ".popsection\n"
#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\ #define ALTERNATIVE_2(oldinstr, altinstr1, feature1, altinstr2, feature2)\
".pushsection .altinstr_replacement, \"ax\"\n" \ ".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(altinstr1, 1) \ ALTINSTR_REPLACEMENT(altinstr1, 1) \
ALTINSTR_REPLACEMENT(altinstr2, 2) \ ALTINSTR_REPLACEMENT(altinstr2, 2) \
".popsection\n" \ ".popsection\n" \
OLDINSTR(oldinstr) \ OLDINSTR(oldinstr) \
".pushsection .altinstructions,\"a\"\n" \ ".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(facility1, 1) \ ALTINSTR_ENTRY(feature1, 1) \
ALTINSTR_ENTRY(facility2, 2) \ ALTINSTR_ENTRY(feature2, 2) \
".popsection\n" ".popsection\n"
/* /*
...@@ -93,12 +163,12 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); ...@@ -93,12 +163,12 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
* For non barrier like inlines please define new variants * For non barrier like inlines please define new variants
* without volatile and memory clobber. * without volatile and memory clobber.
*/ */
#define alternative(oldinstr, altinstr, facility) \ #define alternative(oldinstr, altinstr, feature) \
asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory") asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, feature) : : : "memory")
#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \ #define alternative_2(oldinstr, altinstr1, feature1, altinstr2, feature2) \
asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \ asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, feature1, \
altinstr2, facility2) ::: "memory") altinstr2, feature2) ::: "memory")
/* Alternative inline assembly with input. */ /* Alternative inline assembly with input. */
#define alternative_input(oldinstr, newinstr, feature, input...) \ #define alternative_input(oldinstr, newinstr, feature, input...) \
...@@ -106,8 +176,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); ...@@ -106,8 +176,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
: : input) : : input)
/* Like alternative_input, but with a single output argument */ /* Like alternative_input, but with a single output argument */
#define alternative_io(oldinstr, altinstr, facility, output, input...) \ #define alternative_io(oldinstr, altinstr, feature, output, input...) \
asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) \ asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, feature) \
: output : input) : output : input)
/* Use this macro if more than one output parameter is needed. */ /* Use this macro if more than one output parameter is needed. */
...@@ -116,6 +186,56 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end); ...@@ -116,6 +186,56 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
/* Use this macro if clobbers are needed without inputs. */ /* Use this macro if clobbers are needed without inputs. */
#define ASM_NO_INPUT_CLOBBER(clobber...) : clobber #define ASM_NO_INPUT_CLOBBER(clobber...) : clobber
#else /* __ASSEMBLY__ */
/*
* Issue one struct alt_instr descriptor entry (need to put it into
* the section .altinstructions, see below). This entry contains
* enough information for the alternatives patching code to patch an
* instruction. See apply_alternatives().
*/
.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
.long \orig_start - .
.long \alt_start - .
.long \feature
.byte \orig_end - \orig_start
.org . - ( \orig_end - \orig_start ) & 1
.org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start )
.org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start )
.endm
/*
* Define an alternative between two instructions. If @feature is
* present, early code in apply_alternatives() replaces @oldinstr with
* @newinstr.
*/
.macro ALTERNATIVE oldinstr, newinstr, feature
.pushsection .altinstr_replacement,"ax"
770: \newinstr
771: .popsection
772: \oldinstr
773: .pushsection .altinstructions,"a"
alt_entry 772b, 773b, 770b, 771b, \feature
.popsection
.endm
/*
* Define an alternative between two instructions. If @feature is
* present, early code in apply_alternatives() replaces @oldinstr with
* @newinstr.
*/
.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
.pushsection .altinstr_replacement,"ax"
770: \newinstr1
771: \newinstr2
772: .popsection
773: \oldinstr
774: .pushsection .altinstructions,"a"
alt_entry 773b, 774b, 770b, 771b,\feature1
alt_entry 773b, 774b, 771b, 772b,\feature2
.popsection
.endm
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_ALTERNATIVE_H */ #endif /* _ASM_S390_ALTERNATIVE_H */
...@@ -188,7 +188,8 @@ static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new) ...@@ -188,7 +188,8 @@ static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
return old; return old;
} }
#ifdef __GCC_ASM_FLAG_OUTPUTS__ /* GCC versions before 14.2.0 may die with an ICE in some configurations. */
#if defined(__GCC_ASM_FLAG_OUTPUTS__) && !(IS_ENABLED(CONFIG_CC_IS_GCC) && (GCC_VERSION < 140200))
static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new) static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
{ {
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8) #define MAX_FACILITY_BIT (sizeof(stfle_fac_list) * 8)
extern u64 stfle_fac_list[16]; extern u64 stfle_fac_list[16];
extern u64 alt_stfle_fac_list[16];
static inline void __set_facility(unsigned long nr, void *facilities) static inline void __set_facility(unsigned long nr, void *facilities)
{ {
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
static inline bool is_lowcore_addr(void *addr) static inline bool is_lowcore_addr(void *addr)
{ {
return addr >= (void *)&S390_lowcore && return addr >= (void *)get_lowcore() &&
addr < (void *)(&S390_lowcore + 1); addr < (void *)(get_lowcore() + 1);
} }
static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin) static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin)
...@@ -25,7 +25,7 @@ static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin) ...@@ -25,7 +25,7 @@ static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin)
* order to get a distinct struct page. * order to get a distinct struct page.
*/ */
addr += (void *)lowcore_ptr[raw_smp_processor_id()] - addr += (void *)lowcore_ptr[raw_smp_processor_id()] -
(void *)&S390_lowcore; (void *)get_lowcore();
if (KMSAN_WARN_ON(is_lowcore_addr(addr))) if (KMSAN_WARN_ON(is_lowcore_addr(addr)))
return NULL; return NULL;
return kmsan_get_metadata(addr, is_origin); return kmsan_get_metadata(addr, is_origin);
......
...@@ -14,10 +14,15 @@ ...@@ -14,10 +14,15 @@
#include <asm/ctlreg.h> #include <asm/ctlreg.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/alternative.h>
#define LC_ORDER 1 #define LC_ORDER 1
#define LC_PAGES 2 #define LC_PAGES 2
#define LOWCORE_ALT_ADDRESS _AC(0x70000, UL)
#ifndef __ASSEMBLY__
struct pgm_tdb { struct pgm_tdb {
u64 data[32]; u64 data[32];
}; };
...@@ -97,8 +102,7 @@ struct lowcore { ...@@ -97,8 +102,7 @@ struct lowcore {
__u64 save_area_async[8]; /* 0x0240 */ __u64 save_area_async[8]; /* 0x0240 */
__u64 save_area_restart[1]; /* 0x0280 */ __u64 save_area_restart[1]; /* 0x0280 */
/* CPU flags. */ __u64 pcpu; /* 0x0288 */
__u64 cpu_flags; /* 0x0288 */
/* Return psws. */ /* Return psws. */
psw_t return_psw; /* 0x0290 */ psw_t return_psw; /* 0x0290 */
...@@ -215,7 +219,14 @@ struct lowcore { ...@@ -215,7 +219,14 @@ struct lowcore {
static __always_inline struct lowcore *get_lowcore(void) static __always_inline struct lowcore *get_lowcore(void)
{ {
return NULL; struct lowcore *lc;
if (__is_defined(__DECOMPRESSOR))
return NULL;
asm(ALTERNATIVE("llilh %[lc],0", "llilh %[lc],%[alt]", ALT_LOWCORE)
: [lc] "=d" (lc)
: [alt] "i" (LOWCORE_ALT_ADDRESS >> 16));
return lc;
} }
extern struct lowcore *lowcore_ptr[]; extern struct lowcore *lowcore_ptr[];
...@@ -225,4 +236,19 @@ static inline void set_prefix(__u32 address) ...@@ -225,4 +236,19 @@ static inline void set_prefix(__u32 address)
asm volatile("spx %0" : : "Q" (address) : "memory"); asm volatile("spx %0" : : "Q" (address) : "memory");
} }
#else /* __ASSEMBLY__ */
.macro GET_LC reg
ALTERNATIVE "llilh \reg,0", \
__stringify(llilh \reg, LOWCORE_ALT_ADDRESS >> 16), \
ALT_LOWCORE
.endm
.macro STMG_LC start, end, savearea
ALTERNATIVE "stmg \start, \end, \savearea", \
__stringify(stmg \start, \end, LOWCORE_ALT_ADDRESS + \savearea), \
ALT_LOWCORE
.endm
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_LOWCORE_H */ #endif /* _ASM_S390_LOWCORE_H */
...@@ -5,8 +5,17 @@ ...@@ -5,8 +5,17 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/types.h> #include <linux/types.h>
#include <asm/facility.h>
extern int nospec_disable; extern int nospec_disable;
extern int nobp;
static inline bool nobp_enabled(void)
{
if (__is_defined(__DECOMPRESSOR))
return false;
return nobp && test_facility(82);
}
void nospec_init_branches(void); void nospec_init_branches(void);
void nospec_auto_detect(void); void nospec_auto_detect(void);
......
...@@ -174,12 +174,10 @@ static inline int devmem_is_allowed(unsigned long pfn) ...@@ -174,12 +174,10 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define HAVE_ARCH_FREE_PAGE #define HAVE_ARCH_FREE_PAGE
#define HAVE_ARCH_ALLOC_PAGE #define HAVE_ARCH_ALLOC_PAGE
#if IS_ENABLED(CONFIG_PGSTE)
int arch_make_folio_accessible(struct folio *folio); int arch_make_folio_accessible(struct folio *folio);
#define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE #define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
int arch_make_page_accessible(struct page *page); int arch_make_page_accessible(struct page *page);
#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE #define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
#endif
struct vm_layout { struct vm_layout {
unsigned long kaslr_offset; unsigned long kaslr_offset;
......
...@@ -14,13 +14,11 @@ ...@@ -14,13 +14,11 @@
#include <linux/bits.h> #include <linux/bits.h>
#define CIF_SIE 0 /* CPU needs SIE exit cleanup */
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ #define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */ #define CIF_ENABLED_WAIT 5 /* in enabled wait state */
#define CIF_MCCK_GUEST 6 /* machine check happening in guest */ #define CIF_MCCK_GUEST 6 /* machine check happening in guest */
#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */ #define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
#define _CIF_SIE BIT(CIF_SIE)
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY) #define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT) #define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST) #define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
...@@ -42,21 +40,37 @@ ...@@ -42,21 +40,37 @@
#include <asm/irqflags.h> #include <asm/irqflags.h>
#include <asm/alternative.h> #include <asm/alternative.h>
struct pcpu {
unsigned long ec_mask; /* bit mask for ec_xxx functions */
unsigned long ec_clk; /* sigp timestamp for ec_xxx */
unsigned long flags; /* per CPU flags */
signed char state; /* physical cpu state */
signed char polarization; /* physical polarization */
u16 address; /* physical cpu address */
};
DECLARE_PER_CPU(struct pcpu, pcpu_devices);
typedef long (*sys_call_ptr_t)(struct pt_regs *regs); typedef long (*sys_call_ptr_t)(struct pt_regs *regs);
static __always_inline struct pcpu *this_pcpu(void)
{
return (struct pcpu *)(get_lowcore()->pcpu);
}
static __always_inline void set_cpu_flag(int flag) static __always_inline void set_cpu_flag(int flag)
{ {
get_lowcore()->cpu_flags |= (1UL << flag); this_pcpu()->flags |= (1UL << flag);
} }
static __always_inline void clear_cpu_flag(int flag) static __always_inline void clear_cpu_flag(int flag)
{ {
get_lowcore()->cpu_flags &= ~(1UL << flag); this_pcpu()->flags &= ~(1UL << flag);
} }
static __always_inline bool test_cpu_flag(int flag) static __always_inline bool test_cpu_flag(int flag)
{ {
return get_lowcore()->cpu_flags & (1UL << flag); return this_pcpu()->flags & (1UL << flag);
} }
static __always_inline bool test_and_set_cpu_flag(int flag) static __always_inline bool test_and_set_cpu_flag(int flag)
...@@ -81,9 +95,7 @@ static __always_inline bool test_and_clear_cpu_flag(int flag) ...@@ -81,9 +95,7 @@ static __always_inline bool test_and_clear_cpu_flag(int flag)
*/ */
static __always_inline bool test_cpu_flag_of(int flag, int cpu) static __always_inline bool test_cpu_flag_of(int flag, int cpu)
{ {
struct lowcore *lc = lowcore_ptr[cpu]; return per_cpu(pcpu_devices, cpu).flags & (1UL << flag);
return lc->cpu_flags & (1UL << flag);
} }
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY) #define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
...@@ -405,7 +417,7 @@ static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) ...@@ -405,7 +417,7 @@ static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
static __always_inline void bpon(void) static __always_inline void bpon(void)
{ {
asm volatile(ALTERNATIVE("nop", ".insn rrf,0xb2e80000,0,0,13,0", 82)); asm volatile(ALTERNATIVE("nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82)));
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_RUNTIME_CONST_H
#define _ASM_S390_RUNTIME_CONST_H
#include <linux/uaccess.h>
#define runtime_const_ptr(sym) \
({ \
typeof(sym) __ret; \
\
asm_inline( \
"0: iihf %[__ret],%[c1]\n" \
" iilf %[__ret],%[c2]\n" \
".pushsection runtime_ptr_" #sym ",\"a\"\n" \
".long 0b - .\n" \
".popsection" \
: [__ret] "=d" (__ret) \
: [c1] "i" (0x01234567UL), \
[c2] "i" (0x89abcdefUL)); \
__ret; \
})
#define runtime_const_shift_right_32(val, sym) \
({ \
unsigned int __ret = (val); \
\
asm_inline( \
"0: srl %[__ret],12\n" \
".pushsection runtime_shift_" #sym ",\"a\"\n" \
".long 0b - .\n" \
".popsection" \
: [__ret] "+d" (__ret)); \
__ret; \
})
#define runtime_const_init(type, sym) do { \
extern s32 __start_runtime_##type##_##sym[]; \
extern s32 __stop_runtime_##type##_##sym[]; \
\
runtime_const_fixup(__runtime_fixup_##type, \
(unsigned long)(sym), \
__start_runtime_##type##_##sym, \
__stop_runtime_##type##_##sym); \
} while (0)
/* 32-bit immediate for iihf and iilf in bits in I2 field */
static inline void __runtime_fixup_32(u32 *p, unsigned int val)
{
s390_kernel_write(p, &val, sizeof(val));
}
static inline void __runtime_fixup_ptr(void *where, unsigned long val)
{
__runtime_fixup_32(where + 2, val >> 32);
__runtime_fixup_32(where + 8, val);
}
/* Immediate value is lower 12 bits of D2 field of srl */
static inline void __runtime_fixup_shift(void *where, unsigned long val)
{
u32 insn = *(u32 *)where;
insn &= 0xfffff000;
insn |= (val & 63);
s390_kernel_write(where, &insn, sizeof(insn));
}
static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
unsigned long val, s32 *start, s32 *end)
{
while (start < end) {
fn(*start + (void *)start, val);
start++;
}
}
#endif /* _ASM_S390_RUNTIME_CONST_H */
...@@ -24,7 +24,6 @@ extern int __cpu_up(unsigned int cpu, struct task_struct *tidle); ...@@ -24,7 +24,6 @@ extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void smp_call_online_cpu(void (*func)(void *), void *);
extern void smp_call_ipl_cpu(void (*func)(void *), void *); extern void smp_call_ipl_cpu(void (*func)(void *), void *);
extern void smp_emergency_stop(void); extern void smp_emergency_stop(void);
......
...@@ -79,7 +79,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) ...@@ -79,7 +79,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
typecheck(int, lp->lock); typecheck(int, lp->lock);
kcsan_release(); kcsan_release();
asm_inline volatile( asm_inline volatile(
ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */ ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", ALT_FACILITY(49)) /* NIAI 7 */
" sth %1,%0\n" " sth %1,%0\n"
: "=R" (((unsigned short *) &lp->lock)[1]) : "=R" (((unsigned short *) &lp->lock)[1])
: "d" (0) : "cc", "memory"); : "d" (0) : "cc", "memory");
......
...@@ -40,6 +40,7 @@ struct thread_info { ...@@ -40,6 +40,7 @@ struct thread_info {
unsigned long flags; /* low level flags */ unsigned long flags; /* low level flags */
unsigned long syscall_work; /* SYSCALL_WORK_ flags */ unsigned long syscall_work; /* SYSCALL_WORK_ flags */
unsigned int cpu; /* current CPU */ unsigned int cpu; /* current CPU */
unsigned char sie; /* running in SIE context */
}; };
/* /*
......
...@@ -332,7 +332,14 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo ...@@ -332,7 +332,14 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return __clear_user(to, n); return __clear_user(to, n);
} }
void *s390_kernel_write(void *dst, const void *src, size_t size); void *__s390_kernel_write(void *dst, const void *src, size_t size);
static inline void *s390_kernel_write(void *dst, const void *src, size_t size)
{
if (__is_defined(__DECOMPRESSOR))
return memcpy(dst, src, size);
return __s390_kernel_write(dst, src, size);
}
int __noreturn __put_kernel_bad(void); int __noreturn __put_kernel_bad(void);
......
...@@ -414,7 +414,6 @@ static inline bool uv_has_feature(u8 feature_bit) ...@@ -414,7 +414,6 @@ static inline bool uv_has_feature(u8 feature_bit)
return test_bit_inv(feature_bit, &uv_info.uv_feature_indications); return test_bit_inv(feature_bit, &uv_info.uv_feature_indications);
} }
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
extern int prot_virt_guest; extern int prot_virt_guest;
static inline int is_prot_virt_guest(void) static inline int is_prot_virt_guest(void)
...@@ -466,13 +465,6 @@ static inline int uv_remove_shared(unsigned long addr) ...@@ -466,13 +465,6 @@ static inline int uv_remove_shared(unsigned long addr)
return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS); return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
} }
#else
#define is_prot_virt_guest() 0
static inline int uv_set_shared(unsigned long addr) { return 0; }
static inline int uv_remove_shared(unsigned long addr) { return 0; }
#endif
#if IS_ENABLED(CONFIG_KVM)
extern int prot_virt_host; extern int prot_virt_host;
static inline int is_prot_virt_host(void) static inline int is_prot_virt_host(void)
...@@ -489,29 +481,5 @@ int uv_convert_from_secure_pte(pte_t pte); ...@@ -489,29 +481,5 @@ int uv_convert_from_secure_pte(pte_t pte);
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr); int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
void setup_uv(void); void setup_uv(void);
#else
#define is_prot_virt_host() 0
static inline void setup_uv(void) {}
static inline int uv_pin_shared(unsigned long paddr)
{
return 0;
}
static inline int uv_destroy_folio(struct folio *folio)
{
return 0;
}
static inline int uv_destroy_pte(pte_t pte)
{
return 0;
}
static inline int uv_convert_from_secure_pte(pte_t pte)
{
return 0;
}
#endif
#endif /* _ASM_S390_UV_H */ #endif /* _ASM_S390_UV_H */
...@@ -43,7 +43,7 @@ obj-y += sysinfo.o lgr.o os_info.o ctlreg.o ...@@ -43,7 +43,7 @@ obj-y += sysinfo.o lgr.o os_info.o ctlreg.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o kdebugfs.o alternative.o obj-y += entry.o reipl.o kdebugfs.o alternative.o
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o
extra-y += vmlinux.lds extra-y += vmlinux.lds
...@@ -80,7 +80,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o ...@@ -80,7 +80,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
obj-$(CONFIG_PERF_EVENTS) += perf_pai_crypto.o perf_pai_ext.o obj-$(CONFIG_PERF_EVENTS) += perf_pai_crypto.o perf_pai_ext.o
obj-$(CONFIG_TRACEPOINTS) += trace.o obj-$(CONFIG_TRACEPOINTS) += trace.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
# vdso # vdso
obj-y += vdso64/ obj-y += vdso64/
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <asm/abs_lowcore.h> #include <asm/abs_lowcore.h>
unsigned long __bootdata_preserved(__abs_lowcore); unsigned long __bootdata_preserved(__abs_lowcore);
int __bootdata_preserved(relocate_lowcore);
int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc) int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc)
{ {
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/cpu.h> #include <linux/uaccess.h>
#include <linux/smp.h> #include <asm/nospec-branch.h>
#include <asm/text-patching.h> #include <asm/abs_lowcore.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/facility.h> #include <asm/facility.h>
#include <asm/nospec-branch.h>
static int __initdata_or_module alt_instr_disabled;
static int __init disable_alternative_instructions(char *str)
{
alt_instr_disabled = 1;
return 0;
}
early_param("noaltinstr", disable_alternative_instructions);
static void __init_or_module __apply_alternatives(struct alt_instr *start, void __apply_alternatives(struct alt_instr *start, struct alt_instr *end, unsigned int ctx)
struct alt_instr *end)
{ {
struct alt_instr *a;
u8 *instr, *replacement; u8 *instr, *replacement;
struct alt_instr *a;
bool replace;
/* /*
* The scan order should be from start to end. A later scanned * The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code. * alternative code can overwrite previously scanned alternative code.
*/ */
for (a = start; a < end; a++) { for (a = start; a < end; a++) {
if (!(a->ctx & ctx))
continue;
switch (a->type) {
case ALT_TYPE_FACILITY:
replace = test_facility(a->data);
break;
case ALT_TYPE_SPEC:
replace = nobp_enabled();
break;
case ALT_TYPE_LOWCORE:
replace = have_relocated_lowcore();
break;
default:
replace = false;
}
if (!replace)
continue;
instr = (u8 *)&a->instr_offset + a->instr_offset; instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset; replacement = (u8 *)&a->repl_offset + a->repl_offset;
if (!__test_facility(a->facility, alt_stfle_fac_list))
continue;
s390_kernel_write(instr, replacement, a->instrlen); s390_kernel_write(instr, replacement, a->instrlen);
} }
} }
void __init_or_module apply_alternatives(struct alt_instr *start,
struct alt_instr *end)
{
if (!alt_instr_disabled)
__apply_alternatives(start, end);
}
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
void __init apply_alternative_instructions(void)
{
apply_alternatives(__alt_instructions, __alt_instructions_end);
}
static void do_sync_core(void *info)
{
sync_core();
}
void text_poke_sync(void)
{
on_each_cpu(do_sync_core, NULL, 1);
}
void text_poke_sync_lock(void)
{
cpus_read_lock();
text_poke_sync();
cpus_read_unlock();
}
...@@ -28,6 +28,7 @@ int main(void) ...@@ -28,6 +28,7 @@ int main(void)
BLANK(); BLANK();
/* thread info offsets */ /* thread info offsets */
OFFSET(__TI_flags, task_struct, thread_info.flags); OFFSET(__TI_flags, task_struct, thread_info.flags);
OFFSET(__TI_sie, task_struct, thread_info.sie);
BLANK(); BLANK();
/* pt_regs offsets */ /* pt_regs offsets */
OFFSET(__PT_PSW, pt_regs, psw); OFFSET(__PT_PSW, pt_regs, psw);
...@@ -114,7 +115,7 @@ int main(void) ...@@ -114,7 +115,7 @@ int main(void)
OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync); OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync);
OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async); OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async);
OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart); OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart);
OFFSET(__LC_CPU_FLAGS, lowcore, cpu_flags); OFFSET(__LC_PCPU, lowcore, pcpu);
OFFSET(__LC_RETURN_PSW, lowcore, return_psw); OFFSET(__LC_RETURN_PSW, lowcore, return_psw);
OFFSET(__LC_RETURN_MCCK_PSW, lowcore, return_mcck_psw); OFFSET(__LC_RETURN_MCCK_PSW, lowcore, return_mcck_psw);
OFFSET(__LC_SYS_ENTER_TIMER, lowcore, sys_enter_timer); OFFSET(__LC_SYS_ENTER_TIMER, lowcore, sys_enter_timer);
...@@ -186,5 +187,7 @@ int main(void) ...@@ -186,5 +187,7 @@ int main(void)
#endif #endif
OFFSET(__FTRACE_REGS_PT_REGS, ftrace_regs, regs); OFFSET(__FTRACE_REGS_PT_REGS, ftrace_regs, regs);
DEFINE(__FTRACE_REGS_SIZE, sizeof(struct ftrace_regs)); DEFINE(__FTRACE_REGS_SIZE, sizeof(struct ftrace_regs));
OFFSET(__PCPU_FLAGS, pcpu, flags);
return 0; return 0;
} }
...@@ -48,6 +48,7 @@ decompressor_handled_param(dfltcc); ...@@ -48,6 +48,7 @@ decompressor_handled_param(dfltcc);
decompressor_handled_param(facilities); decompressor_handled_param(facilities);
decompressor_handled_param(nokaslr); decompressor_handled_param(nokaslr);
decompressor_handled_param(cmma); decompressor_handled_param(cmma);
decompressor_handled_param(relocate_lowcore);
#if IS_ENABLED(CONFIG_KVM) #if IS_ENABLED(CONFIG_KVM)
decompressor_handled_param(prot_virt); decompressor_handled_param(prot_virt);
#endif #endif
...@@ -190,13 +191,6 @@ static noinline __init void setup_lowcore_early(void) ...@@ -190,13 +191,6 @@ static noinline __init void setup_lowcore_early(void)
get_lowcore()->preempt_count = INIT_PREEMPT_COUNT; get_lowcore()->preempt_count = INIT_PREEMPT_COUNT;
} }
static noinline __init void setup_facility_list(void)
{
memcpy(alt_stfle_fac_list, stfle_fac_list, sizeof(alt_stfle_fac_list));
if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
__clear_facility(82, alt_stfle_fac_list);
}
static __init void detect_diag9c(void) static __init void detect_diag9c(void)
{ {
unsigned int cpu_address; unsigned int cpu_address;
...@@ -291,7 +285,6 @@ void __init startup_init(void) ...@@ -291,7 +285,6 @@ void __init startup_init(void)
lockdep_off(); lockdep_off();
sort_amode31_extable(); sort_amode31_extable();
setup_lowcore_early(); setup_lowcore_early();
setup_facility_list();
detect_machine_type(); detect_machine_type();
setup_arch_string(); setup_arch_string();
setup_boot_command_line(); setup_boot_command_line();
......
This diff is collapsed.
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/lowcore.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -18,14 +19,15 @@ ...@@ -18,14 +19,15 @@
__HEAD __HEAD
SYM_CODE_START(startup_continue) SYM_CODE_START(startup_continue)
larl %r1,tod_clock_base larl %r1,tod_clock_base
mvc 0(16,%r1),__LC_BOOT_CLOCK GET_LC %r2
mvc 0(16,%r1),__LC_BOOT_CLOCK(%r2)
# #
# Setup stack # Setup stack
# #
larl %r14,init_task larl %r14,init_task
stg %r14,__LC_CURRENT stg %r14,__LC_CURRENT(%r2)
larl %r15,init_thread_union+STACK_INIT_OFFSET larl %r15,init_thread_union+STACK_INIT_OFFSET
stg %r15,__LC_KERNEL_STACK stg %r15,__LC_KERNEL_STACK(%r2)
brasl %r14,sclp_early_adjust_va # allow sclp_early_printk brasl %r14,sclp_early_adjust_va # allow sclp_early_printk
brasl %r14,startup_init # s390 specific early init brasl %r14,startup_init # s390 specific early init
brasl %r14,start_kernel # common init code brasl %r14,start_kernel # common init code
......
...@@ -2112,7 +2112,7 @@ void do_restart(void *arg) ...@@ -2112,7 +2112,7 @@ void do_restart(void *arg)
tracing_off(); tracing_off();
debug_locks_off(); debug_locks_off();
lgr_info_log(); lgr_info_log();
smp_call_online_cpu(__do_restart, arg); smp_call_ipl_cpu(__do_restart, arg);
} }
/* on halt */ /* on halt */
......
...@@ -62,7 +62,7 @@ static void __do_machine_kdump(void *data) ...@@ -62,7 +62,7 @@ static void __do_machine_kdump(void *data)
* This need to be done *after* s390_reset_system set the * This need to be done *after* s390_reset_system set the
* prefix register of this CPU to zero * prefix register of this CPU to zero
*/ */
memcpy(absolute_pointer(__LC_FPREGS_SAVE_AREA), memcpy(absolute_pointer(get_lowcore()->floating_pt_save_area),
phys_to_virt(prefix + __LC_FPREGS_SAVE_AREA), 512); phys_to_virt(prefix + __LC_FPREGS_SAVE_AREA), 512);
call_nodat(1, int, purgatory, int, 1); call_nodat(1, int, purgatory, int, 1);
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
int nobp = IS_ENABLED(CONFIG_KERNEL_NOBP);
static int __init nobp_setup_early(char *str) static int __init nobp_setup_early(char *str)
{ {
bool enabled; bool enabled;
...@@ -17,11 +19,11 @@ static int __init nobp_setup_early(char *str) ...@@ -17,11 +19,11 @@ static int __init nobp_setup_early(char *str)
* The user explicitly requested nobp=1, enable it and * The user explicitly requested nobp=1, enable it and
* disable the expoline support. * disable the expoline support.
*/ */
__set_facility(82, alt_stfle_fac_list); nobp = 1;
if (IS_ENABLED(CONFIG_EXPOLINE)) if (IS_ENABLED(CONFIG_EXPOLINE))
nospec_disable = 1; nospec_disable = 1;
} else { } else {
__clear_facility(82, alt_stfle_fac_list); nobp = 0;
} }
return 0; return 0;
} }
...@@ -29,7 +31,7 @@ early_param("nobp", nobp_setup_early); ...@@ -29,7 +31,7 @@ early_param("nobp", nobp_setup_early);
static int __init nospec_setup_early(char *str) static int __init nospec_setup_early(char *str)
{ {
__clear_facility(82, alt_stfle_fac_list); nobp = 0;
return 0; return 0;
} }
early_param("nospec", nospec_setup_early); early_param("nospec", nospec_setup_early);
...@@ -40,7 +42,7 @@ static int __init nospec_report(void) ...@@ -40,7 +42,7 @@ static int __init nospec_report(void)
pr_info("Spectre V2 mitigation: etokens\n"); pr_info("Spectre V2 mitigation: etokens\n");
if (nospec_uses_trampoline()) if (nospec_uses_trampoline())
pr_info("Spectre V2 mitigation: execute trampolines\n"); pr_info("Spectre V2 mitigation: execute trampolines\n");
if (__test_facility(82, alt_stfle_fac_list)) if (nobp_enabled())
pr_info("Spectre V2 mitigation: limited branch prediction\n"); pr_info("Spectre V2 mitigation: limited branch prediction\n");
return 0; return 0;
} }
...@@ -66,14 +68,14 @@ void __init nospec_auto_detect(void) ...@@ -66,14 +68,14 @@ void __init nospec_auto_detect(void)
*/ */
if (__is_defined(CC_USING_EXPOLINE)) if (__is_defined(CC_USING_EXPOLINE))
nospec_disable = 1; nospec_disable = 1;
__clear_facility(82, alt_stfle_fac_list); nobp = 0;
} else if (__is_defined(CC_USING_EXPOLINE)) { } else if (__is_defined(CC_USING_EXPOLINE)) {
/* /*
* The kernel has been compiled with expolines. * The kernel has been compiled with expolines.
* Keep expolines enabled and disable nobp. * Keep expolines enabled and disable nobp.
*/ */
nospec_disable = 0; nospec_disable = 0;
__clear_facility(82, alt_stfle_fac_list); nobp = 0;
} }
/* /*
* If the kernel has not been compiled with expolines the * If the kernel has not been compiled with expolines the
...@@ -86,7 +88,7 @@ static int __init spectre_v2_setup_early(char *str) ...@@ -86,7 +88,7 @@ static int __init spectre_v2_setup_early(char *str)
{ {
if (str && !strncmp(str, "on", 2)) { if (str && !strncmp(str, "on", 2)) {
nospec_disable = 0; nospec_disable = 0;
__clear_facility(82, alt_stfle_fac_list); nobp = 0;
} }
if (str && !strncmp(str, "off", 3)) if (str && !strncmp(str, "off", 3))
nospec_disable = 1; nospec_disable = 1;
......
...@@ -17,7 +17,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, ...@@ -17,7 +17,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
return sprintf(buf, "Mitigation: etokens\n"); return sprintf(buf, "Mitigation: etokens\n");
if (nospec_uses_trampoline()) if (nospec_uses_trampoline())
return sprintf(buf, "Mitigation: execute trampolines\n"); return sprintf(buf, "Mitigation: execute trampolines\n");
if (__test_facility(82, alt_stfle_fac_list)) if (nobp_enabled())
return sprintf(buf, "Mitigation: limited branch prediction\n"); return sprintf(buf, "Mitigation: limited branch prediction\n");
return sprintf(buf, "Vulnerable\n"); return sprintf(buf, "Vulnerable\n");
} }
...@@ -556,25 +556,31 @@ static int cfdiag_diffctr(struct cpu_cf_events *cpuhw, unsigned long auth) ...@@ -556,25 +556,31 @@ static int cfdiag_diffctr(struct cpu_cf_events *cpuhw, unsigned long auth)
struct cf_trailer_entry *trailer_start, *trailer_stop; struct cf_trailer_entry *trailer_start, *trailer_stop;
struct cf_ctrset_entry *ctrstart, *ctrstop; struct cf_ctrset_entry *ctrstart, *ctrstop;
size_t offset = 0; size_t offset = 0;
int i;
auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1; for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
do {
ctrstart = (struct cf_ctrset_entry *)(cpuhw->start + offset); ctrstart = (struct cf_ctrset_entry *)(cpuhw->start + offset);
ctrstop = (struct cf_ctrset_entry *)(cpuhw->stop + offset); ctrstop = (struct cf_ctrset_entry *)(cpuhw->stop + offset);
/* Counter set not authorized */
if (!(auth & cpumf_ctr_ctl[i]))
continue;
/* Counter set size zero was not saved */
if (!cpum_cf_read_setsize(i))
continue;
if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) { if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) {
pr_err_once("cpum_cf_diag counter set compare error " pr_err_once("cpum_cf_diag counter set compare error "
"in set %i\n", ctrstart->set); "in set %i\n", ctrstart->set);
return 0; return 0;
} }
auth &= ~cpumf_ctr_ctl[ctrstart->set];
if (ctrstart->def == CF_DIAG_CTRSET_DEF) { if (ctrstart->def == CF_DIAG_CTRSET_DEF) {
cfdiag_diffctrset((u64 *)(ctrstart + 1), cfdiag_diffctrset((u64 *)(ctrstart + 1),
(u64 *)(ctrstop + 1), ctrstart->ctr); (u64 *)(ctrstop + 1), ctrstart->ctr);
offset += ctrstart->ctr * sizeof(u64) + offset += ctrstart->ctr * sizeof(u64) +
sizeof(*ctrstart); sizeof(*ctrstart);
} }
} while (ctrstart->def && auth); }
/* Save time_stamp from start of event in stop's trailer */ /* Save time_stamp from start of event in stop's trailer */
trailer_start = (struct cf_trailer_entry *)(cpuhw->start + offset); trailer_start = (struct cf_trailer_entry *)(cpuhw->start + offset);
......
...@@ -17,7 +17,8 @@ ...@@ -17,7 +17,8 @@
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/smp.h>
#include <asm/text-patching.h>
#include <asm/diag.h> #include <asm/diag.h>
#include <asm/facility.h> #include <asm/facility.h>
#include <asm/elf.h> #include <asm/elf.h>
...@@ -79,6 +80,23 @@ void notrace stop_machine_yield(const struct cpumask *cpumask) ...@@ -79,6 +80,23 @@ void notrace stop_machine_yield(const struct cpumask *cpumask)
} }
} }
static void do_sync_core(void *info)
{
sync_core();
}
void text_poke_sync(void)
{
on_each_cpu(do_sync_core, NULL, 1);
}
void text_poke_sync_lock(void)
{
cpus_read_lock();
text_poke_sync();
cpus_read_unlock();
}
/* /*
* cpu_init - initializes state that is per-CPU. * cpu_init - initializes state that is per-CPU.
*/ */
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/nospec-insn.h> #include <asm/nospec-insn.h>
#include <asm/sigp.h> #include <asm/sigp.h>
#include <asm/lowcore.h>
GEN_BR_THUNK %r9 GEN_BR_THUNK %r9
...@@ -20,20 +21,15 @@ ...@@ -20,20 +21,15 @@
# r3 = Parameter for function # r3 = Parameter for function
# #
SYM_CODE_START(store_status) SYM_CODE_START(store_status)
/* Save register one and load save area base */ STMG_LC %r0,%r15,__LC_GPREGS_SAVE_AREA
stg %r1,__LC_SAVE_AREA_RESTART
/* General purpose registers */ /* General purpose registers */
lghi %r1,__LC_GPREGS_SAVE_AREA GET_LC %r13
stmg %r0,%r15,0(%r1)
mvc 8(8,%r1),__LC_SAVE_AREA_RESTART
/* Control registers */ /* Control registers */
lghi %r1,__LC_CREGS_SAVE_AREA stctg %c0,%c15,__LC_CREGS_SAVE_AREA(%r13)
stctg %c0,%c15,0(%r1)
/* Access registers */ /* Access registers */
lghi %r1,__LC_AREGS_SAVE_AREA stamy %a0,%a15,__LC_AREGS_SAVE_AREA(%r13)
stam %a0,%a15,0(%r1)
/* Floating point registers */ /* Floating point registers */
lghi %r1,__LC_FPREGS_SAVE_AREA lay %r1,__LC_FPREGS_SAVE_AREA(%r13)
std %f0, 0x00(%r1) std %f0, 0x00(%r1)
std %f1, 0x08(%r1) std %f1, 0x08(%r1)
std %f2, 0x10(%r1) std %f2, 0x10(%r1)
...@@ -51,21 +47,21 @@ SYM_CODE_START(store_status) ...@@ -51,21 +47,21 @@ SYM_CODE_START(store_status)
std %f14,0x70(%r1) std %f14,0x70(%r1)
std %f15,0x78(%r1) std %f15,0x78(%r1)
/* Floating point control register */ /* Floating point control register */
lghi %r1,__LC_FP_CREG_SAVE_AREA lay %r1,__LC_FP_CREG_SAVE_AREA(%r13)
stfpc 0(%r1) stfpc 0(%r1)
/* CPU timer */ /* CPU timer */
lghi %r1,__LC_CPU_TIMER_SAVE_AREA lay %r1,__LC_CPU_TIMER_SAVE_AREA(%r13)
stpt 0(%r1) stpt 0(%r1)
/* Store prefix register */ /* Store prefix register */
lghi %r1,__LC_PREFIX_SAVE_AREA lay %r1,__LC_PREFIX_SAVE_AREA(%r13)
stpx 0(%r1) stpx 0(%r1)
/* Clock comparator - seven bytes */ /* Clock comparator - seven bytes */
lghi %r1,__LC_CLOCK_COMP_SAVE_AREA
larl %r4,clkcmp larl %r4,clkcmp
stckc 0(%r4) stckc 0(%r4)
lay %r1,__LC_CLOCK_COMP_SAVE_AREA(%r13)
mvc 1(7,%r1),1(%r4) mvc 1(7,%r1),1(%r4)
/* Program status word */ /* Program status word */
lghi %r1,__LC_PSW_SAVE_AREA lay %r1,__LC_PSW_SAVE_AREA(%r13)
epsw %r4,%r5 epsw %r4,%r5
st %r4,0(%r1) st %r4,0(%r1)
st %r5,4(%r1) st %r5,4(%r1)
......
...@@ -149,13 +149,12 @@ unsigned long __bootdata_preserved(max_mappable); ...@@ -149,13 +149,12 @@ unsigned long __bootdata_preserved(max_mappable);
struct physmem_info __bootdata(physmem_info); struct physmem_info __bootdata(physmem_info);
struct vm_layout __bootdata_preserved(vm_layout); struct vm_layout __bootdata_preserved(vm_layout);
EXPORT_SYMBOL_GPL(vm_layout); EXPORT_SYMBOL(vm_layout);
int __bootdata_preserved(__kaslr_enabled); int __bootdata_preserved(__kaslr_enabled);
unsigned int __bootdata_preserved(zlib_dfltcc_support); unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support); EXPORT_SYMBOL(zlib_dfltcc_support);
u64 __bootdata_preserved(stfle_fac_list[16]); u64 __bootdata_preserved(stfle_fac_list[16]);
EXPORT_SYMBOL(stfle_fac_list); EXPORT_SYMBOL(stfle_fac_list);
u64 alt_stfle_fac_list[16];
struct oldmem_data __bootdata_preserved(oldmem_data); struct oldmem_data __bootdata_preserved(oldmem_data);
unsigned long VMALLOC_START; unsigned long VMALLOC_START;
...@@ -406,6 +405,7 @@ static void __init setup_lowcore(void) ...@@ -406,6 +405,7 @@ static void __init setup_lowcore(void)
panic("%s: Failed to allocate %zu bytes align=%zx\n", panic("%s: Failed to allocate %zu bytes align=%zx\n",
__func__, sizeof(*lc), sizeof(*lc)); __func__, sizeof(*lc), sizeof(*lc));
lc->pcpu = (unsigned long)per_cpu_ptr(&pcpu_devices, 0);
lc->restart_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT; lc->restart_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT;
lc->restart_psw.addr = __pa(restart_int_handler); lc->restart_psw.addr = __pa(restart_int_handler);
lc->external_new_psw.mask = PSW_KERNEL_BITS; lc->external_new_psw.mask = PSW_KERNEL_BITS;
...@@ -889,6 +889,9 @@ void __init setup_arch(char **cmdline_p) ...@@ -889,6 +889,9 @@ void __init setup_arch(char **cmdline_p)
else else
pr_info("Linux is running as a guest in 64-bit mode\n"); pr_info("Linux is running as a guest in 64-bit mode\n");
if (have_relocated_lowcore())
pr_info("Lowcore relocated to 0x%px\n", get_lowcore());
log_component_list(); log_component_list();
/* Have one command line that is parsed and saved in /proc/cmdline */ /* Have one command line that is parsed and saved in /proc/cmdline */
......
This diff is collapsed.
...@@ -18,11 +18,22 @@ ...@@ -18,11 +18,22 @@
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/uv.h> #include <asm/uv.h>
#if !IS_ENABLED(CONFIG_KVM)
unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
{
return 0;
}
int gmap_fault(struct gmap *gmap, unsigned long gaddr,
unsigned int fault_flags)
{
return 0;
}
#endif
/* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */ /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
int __bootdata_preserved(prot_virt_guest); int __bootdata_preserved(prot_virt_guest);
EXPORT_SYMBOL(prot_virt_guest); EXPORT_SYMBOL(prot_virt_guest);
#endif
/* /*
* uv_info contains both host and guest information but it's currently only * uv_info contains both host and guest information but it's currently only
...@@ -35,7 +46,6 @@ EXPORT_SYMBOL(prot_virt_guest); ...@@ -35,7 +46,6 @@ EXPORT_SYMBOL(prot_virt_guest);
struct uv_info __bootdata_preserved(uv_info); struct uv_info __bootdata_preserved(uv_info);
EXPORT_SYMBOL(uv_info); EXPORT_SYMBOL(uv_info);
#if IS_ENABLED(CONFIG_KVM)
int __bootdata_preserved(prot_virt_host); int __bootdata_preserved(prot_virt_host);
EXPORT_SYMBOL(prot_virt_host); EXPORT_SYMBOL(prot_virt_host);
...@@ -543,9 +553,6 @@ int arch_make_page_accessible(struct page *page) ...@@ -543,9 +553,6 @@ int arch_make_page_accessible(struct page *page)
return arch_make_folio_accessible(page_folio(page)); return arch_make_folio_accessible(page_folio(page));
} }
EXPORT_SYMBOL_GPL(arch_make_page_accessible); EXPORT_SYMBOL_GPL(arch_make_page_accessible);
#endif
#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
static ssize_t uv_query_facilities(struct kobject *kobj, static ssize_t uv_query_facilities(struct kobject *kobj,
struct kobj_attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
...@@ -721,24 +728,13 @@ static struct attribute_group uv_query_attr_group = { ...@@ -721,24 +728,13 @@ static struct attribute_group uv_query_attr_group = {
static ssize_t uv_is_prot_virt_guest(struct kobject *kobj, static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
struct kobj_attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
int val = 0; return sysfs_emit(buf, "%d\n", prot_virt_guest);
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
val = prot_virt_guest;
#endif
return sysfs_emit(buf, "%d\n", val);
} }
static ssize_t uv_is_prot_virt_host(struct kobject *kobj, static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
struct kobj_attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
int val = 0; return sysfs_emit(buf, "%d\n", prot_virt_host);
#if IS_ENABLED(CONFIG_KVM)
val = prot_virt_host;
#endif
return sysfs_emit(buf, "%d\n", val);
} }
static struct kobj_attribute uv_prot_virt_guest = static struct kobj_attribute uv_prot_virt_guest =
...@@ -790,4 +786,3 @@ static int __init uv_info_init(void) ...@@ -790,4 +786,3 @@ static int __init uv_info_init(void)
return rc; return rc;
} }
device_initcall(uv_info_init); device_initcall(uv_info_init);
#endif
...@@ -190,6 +190,9 @@ SECTIONS ...@@ -190,6 +190,9 @@ SECTIONS
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100) INIT_DATA_SECTION(0x100)
RUNTIME_CONST(shift, d_hash_shift)
RUNTIME_CONST(ptr, dentry_hashtable)
PERCPU_SECTION(0x100) PERCPU_SECTION(0x100)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
...@@ -219,6 +222,8 @@ SECTIONS ...@@ -219,6 +222,8 @@ SECTIONS
QUAD(init_mm) QUAD(init_mm)
QUAD(swapper_pg_dir) QUAD(swapper_pg_dir)
QUAD(invalid_pg_dir) QUAD(invalid_pg_dir)
QUAD(__alt_instructions)
QUAD(__alt_instructions_end)
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
QUAD(kasan_early_shadow_page) QUAD(kasan_early_shadow_page)
QUAD(kasan_early_shadow_pte) QUAD(kasan_early_shadow_pte)
......
...@@ -75,7 +75,7 @@ static inline int arch_load_niai4(int *lock) ...@@ -75,7 +75,7 @@ static inline int arch_load_niai4(int *lock)
int owner; int owner;
asm_inline volatile( asm_inline volatile(
ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */ ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", ALT_FACILITY(49)) /* NIAI 4 */
" l %0,%1\n" " l %0,%1\n"
: "=d" (owner) : "Q" (*lock) : "memory"); : "=d" (owner) : "Q" (*lock) : "memory");
return owner; return owner;
...@@ -86,7 +86,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new) ...@@ -86,7 +86,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
int expected = old; int expected = old;
asm_inline volatile( asm_inline volatile(
ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */ ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */
" cs %0,%3,%1\n" " cs %0,%3,%1\n"
: "=d" (old), "=Q" (*lock) : "=d" (old), "=Q" (*lock)
: "0" (old), "d" (new), "Q" (*lock) : "0" (old), "d" (new), "Q" (*lock)
......
...@@ -36,6 +36,16 @@ enum address_markers_idx { ...@@ -36,6 +36,16 @@ enum address_markers_idx {
VMEMMAP_END_NR, VMEMMAP_END_NR,
VMALLOC_NR, VMALLOC_NR,
VMALLOC_END_NR, VMALLOC_END_NR,
#ifdef CONFIG_KMSAN
KMSAN_VMALLOC_SHADOW_START_NR,
KMSAN_VMALLOC_SHADOW_END_NR,
KMSAN_VMALLOC_ORIGIN_START_NR,
KMSAN_VMALLOC_ORIGIN_END_NR,
KMSAN_MODULES_SHADOW_START_NR,
KMSAN_MODULES_SHADOW_END_NR,
KMSAN_MODULES_ORIGIN_START_NR,
KMSAN_MODULES_ORIGIN_END_NR,
#endif
MODULES_NR, MODULES_NR,
MODULES_END_NR, MODULES_END_NR,
ABS_LOWCORE_NR, ABS_LOWCORE_NR,
...@@ -65,6 +75,16 @@ static struct addr_marker address_markers[] = { ...@@ -65,6 +75,16 @@ static struct addr_marker address_markers[] = {
[VMEMMAP_END_NR] = {0, "vmemmap Area End"}, [VMEMMAP_END_NR] = {0, "vmemmap Area End"},
[VMALLOC_NR] = {0, "vmalloc Area Start"}, [VMALLOC_NR] = {0, "vmalloc Area Start"},
[VMALLOC_END_NR] = {0, "vmalloc Area End"}, [VMALLOC_END_NR] = {0, "vmalloc Area End"},
#ifdef CONFIG_KMSAN
[KMSAN_VMALLOC_SHADOW_START_NR] = {0, "Kmsan vmalloc Shadow Start"},
[KMSAN_VMALLOC_SHADOW_END_NR] = {0, "Kmsan vmalloc Shadow End"},
[KMSAN_VMALLOC_ORIGIN_START_NR] = {0, "Kmsan vmalloc Origins Start"},
[KMSAN_VMALLOC_ORIGIN_END_NR] = {0, "Kmsan vmalloc Origins End"},
[KMSAN_MODULES_SHADOW_START_NR] = {0, "Kmsan Modules Shadow Start"},
[KMSAN_MODULES_SHADOW_END_NR] = {0, "Kmsan Modules Shadow End"},
[KMSAN_MODULES_ORIGIN_START_NR] = {0, "Kmsan Modules Origins Start"},
[KMSAN_MODULES_ORIGIN_END_NR] = {0, "Kmsan Modules Origins End"},
#endif
[MODULES_NR] = {0, "Modules Area Start"}, [MODULES_NR] = {0, "Modules Area Start"},
[MODULES_END_NR] = {0, "Modules Area End"}, [MODULES_END_NR] = {0, "Modules Area End"},
[ABS_LOWCORE_NR] = {0, "Lowcore Area Start"}, [ABS_LOWCORE_NR] = {0, "Lowcore Area Start"},
...@@ -306,6 +326,16 @@ static int pt_dump_init(void) ...@@ -306,6 +326,16 @@ static int pt_dump_init(void)
#ifdef CONFIG_KFENCE #ifdef CONFIG_KFENCE
address_markers[KFENCE_START_NR].start_address = kfence_start; address_markers[KFENCE_START_NR].start_address = kfence_start;
address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE; address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE;
#endif
#ifdef CONFIG_KMSAN
address_markers[KMSAN_VMALLOC_SHADOW_START_NR].start_address = KMSAN_VMALLOC_SHADOW_START;
address_markers[KMSAN_VMALLOC_SHADOW_END_NR].start_address = KMSAN_VMALLOC_SHADOW_END;
address_markers[KMSAN_VMALLOC_ORIGIN_START_NR].start_address = KMSAN_VMALLOC_ORIGIN_START;
address_markers[KMSAN_VMALLOC_ORIGIN_END_NR].start_address = KMSAN_VMALLOC_ORIGIN_END;
address_markers[KMSAN_MODULES_SHADOW_START_NR].start_address = KMSAN_MODULES_SHADOW_START;
address_markers[KMSAN_MODULES_SHADOW_END_NR].start_address = KMSAN_MODULES_SHADOW_END;
address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START;
address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END;
#endif #endif
sort_address_markers(); sort_address_markers();
#ifdef CONFIG_PTDUMP_DEBUGFS #ifdef CONFIG_PTDUMP_DEBUGFS
......
...@@ -48,7 +48,7 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz ...@@ -48,7 +48,7 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz
} }
/* /*
* s390_kernel_write - write to kernel memory bypassing DAT * __s390_kernel_write - write to kernel memory bypassing DAT
* @dst: destination address * @dst: destination address
* @src: source address * @src: source address
* @size: number of bytes to copy * @size: number of bytes to copy
...@@ -61,7 +61,7 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz ...@@ -61,7 +61,7 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz
*/ */
static DEFINE_SPINLOCK(s390_kernel_write_lock); static DEFINE_SPINLOCK(s390_kernel_write_lock);
notrace void *s390_kernel_write(void *dst, const void *src, size_t size) notrace void *__s390_kernel_write(void *dst, const void *src, size_t size)
{ {
void *tmp = dst; void *tmp = dst;
unsigned long flags; unsigned long flags;
......
...@@ -268,33 +268,20 @@ static void zpci_floating_irq_handler(struct airq_struct *airq, ...@@ -268,33 +268,20 @@ static void zpci_floating_irq_handler(struct airq_struct *airq,
} }
} }
int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) static int __alloc_airq(struct zpci_dev *zdev, int msi_vecs,
unsigned long *bit)
{ {
struct zpci_dev *zdev = to_zpci(pdev);
unsigned int hwirq, msi_vecs, cpu;
unsigned long bit;
struct msi_desc *msi;
struct msi_msg msg;
int cpu_addr;
int rc, irq;
zdev->aisb = -1UL;
zdev->msi_first_bit = -1U;
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
if (irq_delivery == DIRECTED) { if (irq_delivery == DIRECTED) {
/* Allocate cpu vector bits */ /* Allocate cpu vector bits */
bit = airq_iv_alloc(zpci_ibv[0], msi_vecs); *bit = airq_iv_alloc(zpci_ibv[0], msi_vecs);
if (bit == -1UL) if (*bit == -1UL)
return -EIO; return -EIO;
} else { } else {
/* Allocate adapter summary indicator bit */ /* Allocate adapter summary indicator bit */
bit = airq_iv_alloc_bit(zpci_sbv); *bit = airq_iv_alloc_bit(zpci_sbv);
if (bit == -1UL) if (*bit == -1UL)
return -EIO; return -EIO;
zdev->aisb = bit; zdev->aisb = *bit;
/* Create adapter interrupt vector */ /* Create adapter interrupt vector */
zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK, NULL); zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK, NULL);
...@@ -302,27 +289,66 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) ...@@ -302,27 +289,66 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return -ENOMEM; return -ENOMEM;
/* Wire up shortcut pointer */ /* Wire up shortcut pointer */
zpci_ibv[bit] = zdev->aibv; zpci_ibv[*bit] = zdev->aibv;
/* Each function has its own interrupt vector */ /* Each function has its own interrupt vector */
bit = 0; *bit = 0;
} }
return 0;
}
/* Request MSI interrupts */ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
unsigned int hwirq, msi_vecs, irqs_per_msi, i, cpu;
struct zpci_dev *zdev = to_zpci(pdev);
struct msi_desc *msi;
struct msi_msg msg;
unsigned long bit;
int cpu_addr;
int rc, irq;
zdev->aisb = -1UL;
zdev->msi_first_bit = -1U;
msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
if (msi_vecs < nvec) {
pr_info("%s requested %d irqs, allocate system limit of %d",
pci_name(pdev), nvec, zdev->max_msi);
}
rc = __alloc_airq(zdev, msi_vecs, &bit);
if (rc < 0)
return rc;
/*
* Request MSI interrupts:
* When using MSI, nvec_used interrupt sources and their irq
* descriptors are controlled through one msi descriptor.
* Thus the outer loop over msi descriptors shall run only once,
* while two inner loops iterate over the interrupt vectors.
* When using MSI-X, each interrupt vector/irq descriptor
* is bound to exactly one msi descriptor (nvec_used is one).
* So the inner loops are executed once, while the outer iterates
* over the MSI-X descriptors.
*/
hwirq = bit; hwirq = bit;
msi_for_each_desc(msi, &pdev->dev, MSI_DESC_NOTASSOCIATED) { msi_for_each_desc(msi, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
rc = -EIO;
if (hwirq - bit >= msi_vecs) if (hwirq - bit >= msi_vecs)
break; break;
irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE, irqs_per_msi = min_t(unsigned int, msi_vecs, msi->nvec_used);
(irq_delivery == DIRECTED) ? irq = __irq_alloc_descs(-1, 0, irqs_per_msi, 0, THIS_MODULE,
msi->affinity : NULL); (irq_delivery == DIRECTED) ?
msi->affinity : NULL);
if (irq < 0) if (irq < 0)
return -ENOMEM; return -ENOMEM;
rc = irq_set_msi_desc(irq, msi);
if (rc) for (i = 0; i < irqs_per_msi; i++) {
return rc; rc = irq_set_msi_desc_off(irq, i, msi);
irq_set_chip_and_handler(irq, &zpci_irq_chip, if (rc)
handle_percpu_irq); return rc;
irq_set_chip_and_handler(irq + i, &zpci_irq_chip,
handle_percpu_irq);
}
msg.data = hwirq - bit; msg.data = hwirq - bit;
if (irq_delivery == DIRECTED) { if (irq_delivery == DIRECTED) {
if (msi->affinity) if (msi->affinity)
...@@ -335,31 +361,35 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) ...@@ -335,31 +361,35 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
msg.address_lo |= (cpu_addr << 8); msg.address_lo |= (cpu_addr << 8);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
airq_iv_set_data(zpci_ibv[cpu], hwirq, irq); for (i = 0; i < irqs_per_msi; i++)
airq_iv_set_data(zpci_ibv[cpu],
hwirq + i, irq + i);
} }
} else { } else {
msg.address_lo = zdev->msi_addr & 0xffffffff; msg.address_lo = zdev->msi_addr & 0xffffffff;
airq_iv_set_data(zdev->aibv, hwirq, irq); for (i = 0; i < irqs_per_msi; i++)
airq_iv_set_data(zdev->aibv, hwirq + i, irq + i);
} }
msg.address_hi = zdev->msi_addr >> 32; msg.address_hi = zdev->msi_addr >> 32;
pci_write_msi_msg(irq, &msg); pci_write_msi_msg(irq, &msg);
hwirq++; hwirq += irqs_per_msi;
} }
zdev->msi_first_bit = bit; zdev->msi_first_bit = bit;
zdev->msi_nr_irqs = msi_vecs; zdev->msi_nr_irqs = hwirq - bit;
rc = zpci_set_irq(zdev); rc = zpci_set_irq(zdev);
if (rc) if (rc)
return rc; return rc;
return (msi_vecs == nvec) ? 0 : msi_vecs; return (zdev->msi_nr_irqs == nvec) ? 0 : zdev->msi_nr_irqs;
} }
void arch_teardown_msi_irqs(struct pci_dev *pdev) void arch_teardown_msi_irqs(struct pci_dev *pdev)
{ {
struct zpci_dev *zdev = to_zpci(pdev); struct zpci_dev *zdev = to_zpci(pdev);
struct msi_desc *msi; struct msi_desc *msi;
unsigned int i;
int rc; int rc;
/* Disable interrupts */ /* Disable interrupts */
...@@ -369,8 +399,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) ...@@ -369,8 +399,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
/* Release MSI interrupts */ /* Release MSI interrupts */
msi_for_each_desc(msi, &pdev->dev, MSI_DESC_ASSOCIATED) { msi_for_each_desc(msi, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_set_msi_desc(msi->irq, NULL); for (i = 0; i < msi->nvec_used; i++) {
irq_free_desc(msi->irq); irq_set_msi_desc(msi->irq + i, NULL);
irq_free_desc(msi->irq + i);
}
msi->msg.address_lo = 0; msi->msg.address_lo = 0;
msi->msg.address_hi = 0; msi->msg.address_hi = 0;
msi->msg.data = 0; msi->msg.data = 0;
......
...@@ -96,7 +96,7 @@ config SCLP_OFB ...@@ -96,7 +96,7 @@ config SCLP_OFB
config S390_UV_UAPI config S390_UV_UAPI
def_tristate m def_tristate m
prompt "Ultravisor userspace API" prompt "Ultravisor userspace API"
depends on S390 && (KVM || PROTECTED_VIRTUALIZATION_GUEST) depends on S390
help help
Selecting exposes parts of the UV interface to userspace Selecting exposes parts of the UV interface to userspace
by providing a misc character device at /dev/uv. by providing a misc character device at /dev/uv.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment