Commit e5a489ab authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-4.12-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "The headliner is a fix for FP/VMX register corruption when using
  transactional memory, and a new selftest to go with it.

  Then there's the virt_addr_valid() fix, currently HARDENDED_USERCOPY
  is tripping on that causing some machines to crash.

  A few other fairly minor fixes for long tail things, and a couple of
  fixes for code we just merged.

  Thanks to: Breno Leitao, Gautham Shenoy, Michael Neuling, Naveen Rao.
  Nicholas Piggin, Paul Mackerras"

* tag 'powerpc-4.12-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/mm: Fix virt_addr_valid() etc. on 64-bit hash
  powerpc/mm: Fix crash in page table dump with huge pages
  powerpc/kprobes: Fix handling of instruction emulation on probe re-entry
  powerpc/powernv: Set NAPSTATELOST after recovering paca on P9 DD1
  selftests/powerpc: Test TM and VMX register state
  powerpc/tm: Fix FP and VMX register corruption
  powerpc/modules: If mprofile-kernel is enabled add it to vermagic
parents 8b4822de e41e53cd
......@@ -14,6 +14,10 @@
#include <asm-generic/module.h>
#ifdef CC_USING_MPROFILE_KERNEL
#define MODULE_ARCH_VERMAGIC "mprofile-kernel"
#endif
#ifndef __powerpc64__
/*
* Thanks to Paul M for explaining this.
......
......@@ -132,7 +132,19 @@ extern long long virt_phys_offset;
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#ifdef CONFIG_PPC_BOOK3S_64
/*
* On hash the vmalloc and other regions alias to the kernel region when passed
* through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
* return true for some vmalloc addresses, which is incorrect. So explicitly
* check that the address is in the kernel region.
*/
#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
pfn_valid(virt_to_pfn(kaddr)))
#else
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
#endif
/*
* On Book-E parts we need __va to parse the device tree and we can't
......
......@@ -416,7 +416,7 @@ power9_dd1_recover_paca:
* which needs to be restored from the stack.
*/
li r3, 1
stb r0,PACA_NAPSTATELOST(r13)
stb r3,PACA_NAPSTATELOST(r13)
blr
/*
......
......@@ -305,16 +305,17 @@ int kprobe_handler(struct pt_regs *regs)
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
if (p->ainsn.boostable >= 0) {
ret = try_to_emulate(p, regs);
if (ret > 0) {
restore_previous_kprobe(kcb);
preempt_enable_no_resched();
return 1;
}
}
prepare_singlestep(p, regs);
return 1;
} else {
if (*addr != BREAKPOINT_INSTRUCTION) {
......
......@@ -864,6 +864,25 @@ static void tm_reclaim_thread(struct thread_struct *thr,
if (!MSR_TM_SUSPENDED(mfmsr()))
return;
/*
* If we are in a transaction and FP is off then we can't have
* used FP inside that transaction. Hence the checkpointed
* state is the same as the live state. We need to copy the
* live state to the checkpointed state so that when the
* transaction is restored, the checkpointed state is correct
* and the aborted transaction sees the correct state. We use
* ckpt_regs.msr here as that's what tm_reclaim will use to
* determine if it's going to write the checkpointed state or
* not. So either this will write the checkpointed registers,
* or reclaim will. Similarly for VMX.
*/
if ((thr->ckpt_regs.msr & MSR_FP) == 0)
memcpy(&thr->ckfp_state, &thr->fp_state,
sizeof(struct thread_fp_state));
if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
memcpy(&thr->ckvr_state, &thr->vr_state,
sizeof(struct thread_vr_state));
giveup_all(container_of(thr, struct task_struct, thread));
tm_reclaim(thr, thr->ckpt_regs.msr, cause);
......
......@@ -16,6 +16,7 @@
*/
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/hugetlb.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
......@@ -391,7 +392,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
addr = start + i * PMD_SIZE;
if (!pmd_none(*pmd))
if (!pmd_none(*pmd) && !pmd_huge(*pmd))
/* pmd exists */
walk_pte(st, pmd, addr);
else
......@@ -407,7 +408,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
addr = start + i * PUD_SIZE;
if (!pud_none(*pud))
if (!pud_none(*pud) && !pud_huge(*pud))
/* pud exists */
walk_pmd(st, pud, addr);
else
......@@ -427,7 +428,7 @@ static void walk_pagetables(struct pg_state *st)
*/
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = KERN_VIRT_START + i * PGDIR_SIZE;
if (!pgd_none(*pgd))
if (!pgd_none(*pgd) && !pgd_huge(*pgd))
/* pgd exists */
walk_pud(st, pgd, addr);
else
......
......@@ -11,3 +11,4 @@ tm-signal-context-chk-fpu
tm-signal-context-chk-gpr
tm-signal-context-chk-vmx
tm-signal-context-chk-vsx
tm-vmx-unavail
......@@ -2,7 +2,8 @@ SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu
tm-signal-context-chk-vmx tm-signal-context-chk-vsx
TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \
tm-vmxcopy tm-fork tm-tar tm-tmspr $(SIGNAL_CONTEXT_CHK_TESTS)
tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail \
$(SIGNAL_CONTEXT_CHK_TESTS)
include ../../lib.mk
......@@ -13,6 +14,7 @@ CFLAGS += -mhtm
$(OUTPUT)/tm-syscall: tm-syscall-asm.S
$(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include
$(OUTPUT)/tm-tmspr: CFLAGS += -pthread
$(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64
SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS))
$(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S
......
/*
* Copyright 2017, Michael Neuling, IBM Corp.
* Licensed under GPLv2.
* Original: Breno Leitao <brenohl@br.ibm.com> &
* Gustavo Bueno Romero <gromero@br.ibm.com>
* Edited: Michael Neuling
*
* Force VMX unavailable during a transaction and see if it corrupts
* the checkpointed VMX register state after the abort.
*/
#include <inttypes.h>
#include <htmintrin.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <pthread.h>
#include <sys/mman.h>
#include <unistd.h>
#include <pthread.h>
#include "tm.h"
#include "utils.h"
int passed;
void *worker(void *unused)
{
__int128 vmx0;
uint64_t texasr;
asm goto (
"li 3, 1;" /* Stick non-zero value in VMX0 */
"std 3, 0(%[vmx0_ptr]);"
"lvx 0, 0, %[vmx0_ptr];"
/* Wait here a bit so we get scheduled out 255 times */
"lis 3, 0x3fff;"
"1: ;"
"addi 3, 3, -1;"
"cmpdi 3, 0;"
"bne 1b;"
/* Kernel will hopefully turn VMX off now */
"tbegin. ;"
"beq failure;"
/* Cause VMX unavail. Any VMX instruction */
"vaddcuw 0,0,0;"
"tend. ;"
"b %l[success];"
/* Check VMX0 sanity after abort */
"failure: ;"
"lvx 1, 0, %[vmx0_ptr];"
"vcmpequb. 2, 0, 1;"
"bc 4, 24, %l[value_mismatch];"
"b %l[value_match];"
:
: [vmx0_ptr] "r"(&vmx0)
: "r3"
: success, value_match, value_mismatch
);
/* HTM aborted and VMX0 is corrupted */
value_mismatch:
texasr = __builtin_get_texasr();
printf("\n\n==============\n\n");
printf("Failure with error: %lx\n", _TEXASR_FAILURE_CODE(texasr));
printf("Summary error : %lx\n", _TEXASR_FAILURE_SUMMARY(texasr));
printf("TFIAR exact : %lx\n\n", _TEXASR_TFIAR_EXACT(texasr));
passed = 0;
return NULL;
/* HTM aborted but VMX0 is correct */
value_match:
// printf("!");
return NULL;
success:
// printf(".");
return NULL;
}
int tm_vmx_unavail_test()
{
int threads;
pthread_t *thread;
SKIP_IF(!have_htm());
passed = 1;
threads = sysconf(_SC_NPROCESSORS_ONLN) * 4;
thread = malloc(sizeof(pthread_t)*threads);
if (!thread)
return EXIT_FAILURE;
for (uint64_t i = 0; i < threads; i++)
pthread_create(&thread[i], NULL, &worker, NULL);
for (uint64_t i = 0; i < threads; i++)
pthread_join(thread[i], NULL);
free(thread);
return passed ? EXIT_SUCCESS : EXIT_FAILURE;
}
int main(int argc, char **argv)
{
return test_harness(tm_vmx_unavail_test, "tm_vmx_unavail_test");
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment