Commit 81d55984 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-5.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Vasily Gorbik:

 - Add support for KASAN_VMALLOC feature.

 - Remove the last user of problematic diag 0x44 call.

 - Adjust sampling interval and avoid sample data block overflow
   condition on pressure in perf code.

 - Prefer EOPNOTSUPP over ENOTSUPP and comments fixes.

* tag 's390-5.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/kasan: add KASAN_VMALLOC support
  s390: remove last diag 0x44 caller
  s390/uv: use EOPNOTSUPP instead of ENOTSUPP
  s390/cpum_sf: Avoid SBD overflow condition in irq handler
  s390/cpum_sf: Adjust sampling interval to avoid hitting sample limits
  s390/test_unwind: fix spelling mistake "reqister" -> "register"
  s390/spinlock: remove confusing comment in arch_spin_lock_wait
parents f791ede3 3e39ce26
...@@ -124,6 +124,7 @@ config S390 ...@@ -124,6 +124,7 @@ config S390
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN select HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_VMALLOC
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY select HAVE_ARCH_SOFT_DIRTY
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#define MACHINE_FLAG_DIAG9C BIT(3) #define MACHINE_FLAG_DIAG9C BIT(3)
#define MACHINE_FLAG_ESOP BIT(4) #define MACHINE_FLAG_ESOP BIT(4)
#define MACHINE_FLAG_IDTE BIT(5) #define MACHINE_FLAG_IDTE BIT(5)
#define MACHINE_FLAG_DIAG44 BIT(6)
#define MACHINE_FLAG_EDAT1 BIT(7) #define MACHINE_FLAG_EDAT1 BIT(7)
#define MACHINE_FLAG_EDAT2 BIT(8) #define MACHINE_FLAG_EDAT2 BIT(8)
#define MACHINE_FLAG_TOPOLOGY BIT(10) #define MACHINE_FLAG_TOPOLOGY BIT(10)
...@@ -94,7 +93,6 @@ extern unsigned long __swsusp_reset_dma; ...@@ -94,7 +93,6 @@ extern unsigned long __swsusp_reset_dma;
#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) #define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP) #define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP)
#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) #define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) #define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) #define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
......
...@@ -86,7 +86,7 @@ static inline int share(unsigned long addr, u16 cmd) ...@@ -86,7 +86,7 @@ static inline int share(unsigned long addr, u16 cmd)
}; };
if (!is_prot_virt_guest()) if (!is_prot_virt_guest())
return -ENOTSUPP; return -EOPNOTSUPP;
/* /*
* Sharing is page wise, if we encounter addresses that are * Sharing is page wise, if we encounter addresses that are
* not page aligned, we assume something went wrong. If * not page aligned, we assume something went wrong. If
......
...@@ -204,21 +204,6 @@ static __init void detect_diag9c(void) ...@@ -204,21 +204,6 @@ static __init void detect_diag9c(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C; S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
} }
static __init void detect_diag44(void)
{
int rc;
diag_stat_inc(DIAG_STAT_X044);
asm volatile(
" diag 0,0,0x44\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
if (!rc)
S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
}
static __init void detect_machine_facilities(void) static __init void detect_machine_facilities(void)
{ {
if (test_facility(8)) { if (test_facility(8)) {
...@@ -331,7 +316,6 @@ void __init startup_init(void) ...@@ -331,7 +316,6 @@ void __init startup_init(void)
setup_arch_string(); setup_arch_string();
setup_boot_command_line(); setup_boot_command_line();
detect_diag9c(); detect_diag9c();
detect_diag44();
detect_machine_facilities(); detect_machine_facilities();
save_vector_registers(); save_vector_registers();
setup_topology(); setup_topology();
......
...@@ -1303,18 +1303,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all) ...@@ -1303,18 +1303,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
*/ */
if (flush_all && done) if (flush_all && done)
break; break;
/* If an event overflow happened, discard samples by
* processing any remaining sample-data-blocks.
*/
if (event_overflow)
flush_all = 1;
} }
/* Account sample overflows in the event hardware structure */ /* Account sample overflows in the event hardware structure */
if (sampl_overflow) if (sampl_overflow)
OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
sampl_overflow, 1 + num_sdb); sampl_overflow, 1 + num_sdb);
/* Perf_event_overflow() and perf_event_account_interrupt() limit
* the interrupt rate to an upper limit. Roughly 1000 samples per
* task tick.
* Hitting this limit results in a large number
* of throttled REF_REPORT_THROTTLE entries and the samples
* are dropped.
* Slightly increase the interval to avoid hitting this limit.
*/
if (event_overflow) {
SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
__func__,
DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
}
if (sampl_overflow || event_overflow) if (sampl_overflow || event_overflow)
debug_sprintf_event(sfdbg, 4, "%s: " debug_sprintf_event(sfdbg, 4, "%s: "
"overflows: sample %llu event %llu" "overflows: sample %llu event %llu"
......
...@@ -413,14 +413,11 @@ EXPORT_SYMBOL(arch_vcpu_is_preempted); ...@@ -413,14 +413,11 @@ EXPORT_SYMBOL(arch_vcpu_is_preempted);
void smp_yield_cpu(int cpu) void smp_yield_cpu(int cpu)
{ {
if (MACHINE_HAS_DIAG9C) { if (!MACHINE_HAS_DIAG9C)
diag_stat_inc_norecursion(DIAG_STAT_X09C); return;
asm volatile("diag %0,0,0x9c" diag_stat_inc_norecursion(DIAG_STAT_X09C);
: : "d" (pcpu_devices[cpu].address)); asm volatile("diag %0,0,0x9c"
} else if (MACHINE_HAS_DIAG44 && !smp_cpu_mtid) { : : "d" (pcpu_devices[cpu].address));
diag_stat_inc_norecursion(DIAG_STAT_X044);
asm volatile("diag 0,0,0x44");
}
} }
/* /*
......
...@@ -242,7 +242,6 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp) ...@@ -242,7 +242,6 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
void arch_spin_lock_wait(arch_spinlock_t *lp) void arch_spin_lock_wait(arch_spinlock_t *lp)
{ {
/* Use classic spinlocks + niai if the steal time is >= 10% */
if (test_cpu_flag(CIF_DEDICATED_CPU)) if (test_cpu_flag(CIF_DEDICATED_CPU))
arch_spin_lock_queued(lp); arch_spin_lock_queued(lp);
else else
......
...@@ -238,7 +238,7 @@ static int test_unwind_irq(struct unwindme *u) ...@@ -238,7 +238,7 @@ static int test_unwind_irq(struct unwindme *u)
{ {
preempt_disable(); preempt_disable();
if (register_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler)) { if (register_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler)) {
pr_info("Couldn't reqister external interrupt handler"); pr_info("Couldn't register external interrupt handler");
return -1; return -1;
} }
u->task = current; u->task = current;
......
...@@ -82,7 +82,8 @@ static pte_t * __init kasan_early_pte_alloc(void) ...@@ -82,7 +82,8 @@ static pte_t * __init kasan_early_pte_alloc(void)
enum populate_mode { enum populate_mode {
POPULATE_ONE2ONE, POPULATE_ONE2ONE,
POPULATE_MAP, POPULATE_MAP,
POPULATE_ZERO_SHADOW POPULATE_ZERO_SHADOW,
POPULATE_SHALLOW
}; };
static void __init kasan_early_vmemmap_populate(unsigned long address, static void __init kasan_early_vmemmap_populate(unsigned long address,
unsigned long end, unsigned long end,
...@@ -116,6 +117,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, ...@@ -116,6 +117,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
pgd_populate(&init_mm, pg_dir, p4_dir); pgd_populate(&init_mm, pg_dir, p4_dir);
} }
if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
mode == POPULATE_SHALLOW) {
address = (address + P4D_SIZE) & P4D_MASK;
continue;
}
p4_dir = p4d_offset(pg_dir, address); p4_dir = p4d_offset(pg_dir, address);
if (p4d_none(*p4_dir)) { if (p4d_none(*p4_dir)) {
if (mode == POPULATE_ZERO_SHADOW && if (mode == POPULATE_ZERO_SHADOW &&
...@@ -130,6 +137,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, ...@@ -130,6 +137,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
p4d_populate(&init_mm, p4_dir, pu_dir); p4d_populate(&init_mm, p4_dir, pu_dir);
} }
if (!IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
mode == POPULATE_SHALLOW) {
address = (address + PUD_SIZE) & PUD_MASK;
continue;
}
pu_dir = pud_offset(p4_dir, address); pu_dir = pud_offset(p4_dir, address);
if (pud_none(*pu_dir)) { if (pud_none(*pu_dir)) {
if (mode == POPULATE_ZERO_SHADOW && if (mode == POPULATE_ZERO_SHADOW &&
...@@ -195,6 +208,9 @@ static void __init kasan_early_vmemmap_populate(unsigned long address, ...@@ -195,6 +208,9 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
page = kasan_early_shadow_page; page = kasan_early_shadow_page;
pte_val(*pt_dir) = __pa(page) | pgt_prot_zero; pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
break; break;
case POPULATE_SHALLOW:
/* should never happen */
break;
} }
} }
address += PAGE_SIZE; address += PAGE_SIZE;
...@@ -313,22 +329,50 @@ void __init kasan_early_init(void) ...@@ -313,22 +329,50 @@ void __init kasan_early_init(void)
init_mm.pgd = early_pg_dir; init_mm.pgd = early_pg_dir;
/* /*
* Current memory layout: * Current memory layout:
* +- 0 -------------+ +- shadow start -+ * +- 0 -------------+ +- shadow start -+
* | 1:1 ram mapping | /| 1/8 ram | * | 1:1 ram mapping | /| 1/8 ram |
* +- end of ram ----+ / +----------------+ * | | / | |
* | ... gap ... |/ | kasan | * +- end of ram ----+ / +----------------+
* +- shadow start --+ | zero | * | ... gap ... | / | |
* | 1/8 addr space | | page | * | |/ | kasan |
* +- shadow end -+ | mapping | * +- shadow start --+ | zero |
* | ... gap ... |\ | (untracked) | * | 1/8 addr space | | page |
* +- modules vaddr -+ \ +----------------+ * +- shadow end -+ | mapping |
* | 2Gb | \| unmapped | allocated per module * | ... gap ... |\ | (untracked) |
* +-----------------+ +- shadow end ---+ * +- vmalloc area -+ \ | |
* | vmalloc_size | \ | |
* +- modules vaddr -+ \ +----------------+
* | 2Gb | \| unmapped | allocated per module
* +-----------------+ +- shadow end ---+
*
* Current memory layout (KASAN_VMALLOC):
* +- 0 -------------+ +- shadow start -+
* | 1:1 ram mapping | /| 1/8 ram |
* | | / | |
* +- end of ram ----+ / +----------------+
* | ... gap ... | / | kasan |
* | |/ | zero |
* +- shadow start --+ | page |
* | 1/8 addr space | | mapping |
* +- shadow end -+ | (untracked) |
* | ... gap ... |\ | |
* +- vmalloc area -+ \ +- vmalloc area -+
* | vmalloc_size | \ |shallow populate|
* +- modules vaddr -+ \ +- modules area -+
* | 2Gb | \|shallow populate|
* +-----------------+ +- shadow end ---+
*/ */
/* populate kasan shadow (for identity mapping and zero page mapping) */ /* populate kasan shadow (for identity mapping and zero page mapping) */
kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP); kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
if (IS_ENABLED(CONFIG_MODULES)) if (IS_ENABLED(CONFIG_MODULES))
untracked_mem_end = vmax - MODULES_LEN; untracked_mem_end = vmax - MODULES_LEN;
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_mem_end = vmax - vmalloc_size - MODULES_LEN;
/* shallowly populate kasan shadow for vmalloc and modules */
kasan_early_vmemmap_populate(__sha(untracked_mem_end),
__sha(vmax), POPULATE_SHALLOW);
}
/* populate kasan shadow for untracked memory */
kasan_early_vmemmap_populate(__sha(max_physmem_end), kasan_early_vmemmap_populate(__sha(max_physmem_end),
__sha(untracked_mem_end), __sha(untracked_mem_end),
POPULATE_ZERO_SHADOW); POPULATE_ZERO_SHADOW);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment