Commit 5eb7cfb3 authored by Michael Ellerman's avatar Michael Ellerman

selftests/powerpc: Add a test of bad (out-of-range) accesses

Userspace isn't allowed to access certain address ranges, make sure we
actually test that to at least some degree.

This would have caught the recent bug where the SLB fault handler was
incorrectly called on an out-of-range access when using the Radix MMU.
It also would have caught the bug we had in get_region_id() where we
were inserting SLB entries for bad addresses.
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190520102051.12103-1-mpe@ellerman.id.au
parent 1a3ec143
...@@ -5,3 +5,4 @@ prot_sao ...@@ -5,3 +5,4 @@ prot_sao
segv_errors segv_errors
wild_bctr wild_bctr
large_vm_fork_separation large_vm_fork_separation
bad_accesses
...@@ -3,7 +3,7 @@ noarg: ...@@ -3,7 +3,7 @@ noarg:
$(MAKE) -C ../ $(MAKE) -C ../
TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \ TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \
large_vm_fork_separation large_vm_fork_separation bad_accesses
TEST_GEN_PROGS_EXTENDED := tlbie_test TEST_GEN_PROGS_EXTENDED := tlbie_test
TEST_GEN_FILES := tempfile TEST_GEN_FILES := tempfile
...@@ -16,6 +16,7 @@ $(OUTPUT)/prot_sao: ../utils.c ...@@ -16,6 +16,7 @@ $(OUTPUT)/prot_sao: ../utils.c
$(OUTPUT)/wild_bctr: CFLAGS += -m64 $(OUTPUT)/wild_bctr: CFLAGS += -m64
$(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64 $(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
$(OUTPUT)/bad_accesses: CFLAGS += -m64
$(OUTPUT)/tempfile: $(OUTPUT)/tempfile:
dd if=/dev/zero of=$@ bs=64k count=1 dd if=/dev/zero of=$@ bs=64k count=1
......
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2019, Michael Ellerman, IBM Corp.
//
// Test that out-of-bounds reads/writes behave as expected.
#include <setjmp.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "utils.h"
// Old distros (Ubuntu 16.04 at least) don't define this
#ifndef SEGV_BNDERR
#define SEGV_BNDERR 3
#endif
// 64-bit kernel is always here
#define PAGE_OFFSET (0xcul << 60)
static unsigned long kernel_virt_end;
static volatile int fault_code;
static volatile unsigned long fault_addr;
static jmp_buf setjmp_env;
static void segv_handler(int n, siginfo_t *info, void *ctxt_v)
{
fault_code = info->si_code;
fault_addr = (unsigned long)info->si_addr;
siglongjmp(setjmp_env, 1);
}
int bad_access(char *p, bool write)
{
char x;
fault_code = 0;
fault_addr = 0;
if (sigsetjmp(setjmp_env, 1) == 0) {
if (write)
*p = 1;
else
x = *p;
printf("Bad - no SEGV! (%c)\n", x);
return 1;
}
// If we see MAPERR that means we took a page fault rather than an SLB
// miss. We only expect to take page faults for addresses within the
// valid kernel range.
FAIL_IF(fault_code == SEGV_MAPERR && \
(fault_addr < PAGE_OFFSET || fault_addr >= kernel_virt_end));
FAIL_IF(fault_code != SEGV_MAPERR && fault_code != SEGV_BNDERR);
return 0;
}
static int using_hash_mmu(bool *using_hash)
{
char line[128];
FILE *f;
int rc;
f = fopen("/proc/cpuinfo", "r");
FAIL_IF(!f);
rc = 0;
while (fgets(line, sizeof(line), f) != NULL) {
if (strcmp(line, "MMU : Hash\n") == 0) {
*using_hash = true;
goto out;
}
if (strcmp(line, "MMU : Radix\n") == 0) {
*using_hash = false;
goto out;
}
}
rc = -1;
out:
fclose(f);
return rc;
}
static int test(void)
{
unsigned long i, j, addr, region_shift, page_shift, page_size;
struct sigaction sig;
bool hash_mmu;
sig = (struct sigaction) {
.sa_sigaction = segv_handler,
.sa_flags = SA_SIGINFO,
};
FAIL_IF(sigaction(SIGSEGV, &sig, NULL) != 0);
FAIL_IF(using_hash_mmu(&hash_mmu));
page_size = sysconf(_SC_PAGESIZE);
if (page_size == (64 * 1024))
page_shift = 16;
else
page_shift = 12;
if (page_size == (64 * 1024) || !hash_mmu) {
region_shift = 52;
// We have 7 512T regions (4 kernel linear, vmalloc, io, vmemmap)
kernel_virt_end = PAGE_OFFSET + (7 * (512ul << 40));
} else if (page_size == (4 * 1024) && hash_mmu) {
region_shift = 46;
// We have 7 64T regions (4 kernel linear, vmalloc, io, vmemmap)
kernel_virt_end = PAGE_OFFSET + (7 * (64ul << 40));
} else
FAIL_IF(true);
printf("Using %s MMU, PAGE_SIZE = %dKB start address 0x%016lx\n",
hash_mmu ? "hash" : "radix",
(1 << page_shift) >> 10,
1ul << region_shift);
// This generates access patterns like:
// 0x0010000000000000
// 0x0010000000010000
// 0x0010000000020000
// ...
// 0x0014000000000000
// 0x0018000000000000
// 0x0020000000000000
// 0x0020000000010000
// 0x0020000000020000
// ...
// 0xf400000000000000
// 0xf800000000000000
for (i = 1; i <= ((0xful << 60) >> region_shift); i++) {
for (j = page_shift - 1; j < 60; j++) {
unsigned long base, delta;
base = i << region_shift;
delta = 1ul << j;
if (delta >= base)
break;
addr = (base | delta) & ~((1 << page_shift) - 1);
FAIL_IF(bad_access((char *)addr, false));
FAIL_IF(bad_access((char *)addr, true));
}
}
return 0;
}
int main(void)
{
return test_harness(test, "bad_accesses");
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment