Commit 51d0f0cb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Misc fixes:

   - early_idt_handlers[] fix that fixes the build with bleeding edge
     tooling

   - build warning fix on GCC 5.1

   - vm86 fix plus self-test to make it harder to break it again"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/asm/irq: Stop relying on magic JMP behavior for early_idt_handlers
  x86/asm/entry/32, selftests: Add a selftest for kernel entries from VM86 mode
  x86/boot: Add CONFIG_PARAVIRT_SPINLOCKS quirk to arch/x86/boot/compressed/misc.h
  x86/asm/entry/32: Really make user_mode() work correctly for VM86 mode
parents a0e9c6ef 425be567
......@@ -2,15 +2,14 @@
#define BOOT_COMPRESSED_MISC_H
/*
* we have to be careful, because no indirections are allowed here, and
* paravirt_ops is a kind of one. As it will only run in baremetal anyway,
* we just keep it from happening
* Special hack: we have to be careful, because no indirections are allowed here,
* and paravirt_ops is a kind of one. As it will only run in baremetal anyway,
* we just keep it from happening. (This list needs to be extended when new
* paravirt and debugging variants are added.)
*/
#undef CONFIG_PARAVIRT
#undef CONFIG_PARAVIRT_SPINLOCKS
#undef CONFIG_KASAN
#ifdef CONFIG_X86_32
#define _ASM_X86_DESC_H 1
#endif
#include <linux/linkage.h>
#include <linux/screen_info.h>
......
......@@ -107,7 +107,7 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
static inline int user_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL;
#else
return !!(regs->cs & 3);
#endif
......
......@@ -231,11 +231,21 @@
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
#ifdef __KERNEL__
/*
* early_idt_handler_array is an array of entry points referenced in the
* early IDT. For simplicity, it's a real array with one entry point
* every nine bytes. That leaves room for an optional 'push $0' if the
* vector has no error code (two bytes), a 'push $vector_number' (two
* bytes), and a jump to the common entry code (up to five bytes).
*/
#define EARLY_IDT_HANDLER_SIZE 9
#ifndef __ASSEMBLY__
extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
#ifdef CONFIG_TRACING
# define trace_early_idt_handlers early_idt_handlers
# define trace_early_idt_handler_array early_idt_handler_array
#endif
/*
......
......@@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
clear_bss();
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
set_intr_gate(i, early_idt_handlers[i]);
set_intr_gate(i, early_idt_handler_array[i]);
load_idt((const struct desc_ptr *)&idt_descr);
copy_bootdata(__va(real_mode_data));
......
......@@ -478,21 +478,22 @@ is486:
__INIT
setup_once:
/*
* Set up a idt with 256 entries pointing to ignore_int,
* interrupt gates. It doesn't actually load idt - that needs
* to be done on each CPU. Interrupts are enabled elsewhere,
* when we can be relatively sure everything is ok.
* Set up a idt with 256 interrupt gates that push zero if there
* is no error code and then jump to early_idt_handler_common.
* It doesn't actually load the idt - that needs to be done on
* each CPU. Interrupts are enabled elsewhere, when we can be
* relatively sure everything is ok.
*/
movl $idt_table,%edi
movl $early_idt_handlers,%eax
movl $early_idt_handler_array,%eax
movl $NUM_EXCEPTION_VECTORS,%ecx
1:
movl %eax,(%edi)
movl %eax,4(%edi)
/* interrupt gate, dpl=0, present */
movl $(0x8E000000 + __KERNEL_CS),2(%edi)
addl $9,%eax
addl $EARLY_IDT_HANDLER_SIZE,%eax
addl $8,%edi
loop 1b
......@@ -524,26 +525,28 @@ setup_once:
andl $0,setup_once_ref /* Once is enough, thanks */
ret
ENTRY(early_idt_handlers)
ENTRY(early_idt_handler_array)
# 36(%esp) %eflags
# 32(%esp) %cs
# 28(%esp) %eip
# 24(%rsp) error code
i = 0
.rept NUM_EXCEPTION_VECTORS
.if (EXCEPTION_ERRCODE_MASK >> i) & 1
ASM_NOP2
.else
.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
pushl $0 # Dummy error code, to make stack frame uniform
.endif
pushl $i # 20(%esp) Vector number
jmp early_idt_handler
jmp early_idt_handler_common
i = i + 1
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr
ENDPROC(early_idt_handlers)
ENDPROC(early_idt_handler_array)
/* This is global to keep gas from relaxing the jumps */
ENTRY(early_idt_handler)
early_idt_handler_common:
/*
* The stack is the hardware frame, an error code or zero, and the
* vector number.
*/
cld
cmpl $2,(%esp) # X86_TRAP_NMI
......@@ -603,7 +606,7 @@ ex_entry:
is_nmi:
addl $8,%esp /* drop vector number and error code */
iret
ENDPROC(early_idt_handler)
ENDPROC(early_idt_handler_common)
/* This is the default interrupt "handler" :-) */
ALIGN
......
......@@ -321,26 +321,28 @@ bad_address:
jmp bad_address
__INIT
.globl early_idt_handlers
early_idt_handlers:
ENTRY(early_idt_handler_array)
# 104(%rsp) %rflags
# 96(%rsp) %cs
# 88(%rsp) %rip
# 80(%rsp) error code
i = 0
.rept NUM_EXCEPTION_VECTORS
.if (EXCEPTION_ERRCODE_MASK >> i) & 1
ASM_NOP2
.else
.ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
pushq $0 # Dummy error code, to make stack frame uniform
.endif
pushq $i # 72(%rsp) Vector number
jmp early_idt_handler
jmp early_idt_handler_common
i = i + 1
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr
ENDPROC(early_idt_handler_array)
/* This is global to keep gas from relaxing the jumps */
ENTRY(early_idt_handler)
early_idt_handler_common:
/*
* The stack is the hardware frame, an error code or zero, and the
* vector number.
*/
cld
cmpl $2,(%rsp) # X86_TRAP_NMI
......@@ -412,7 +414,7 @@ ENTRY(early_idt_handler)
is_nmi:
addq $16,%rsp # drop vector number and error code
INTERRUPT_RETURN
ENDPROC(early_idt_handler)
ENDPROC(early_idt_handler_common)
__INITDATA
......
......@@ -5,8 +5,10 @@ include ../lib.mk
.PHONY: all all_32 all_64 warn_32bit_failure clean
TARGETS_C_BOTHBITS := sigreturn single_step_syscall
TARGETS_C_32BIT_ONLY := entry_from_vm86
BINARIES_32 := $(TARGETS_C_BOTHBITS:%=%_32)
TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64)
CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
......@@ -32,7 +34,7 @@ all_64: $(BINARIES_64)
clean:
$(RM) $(BINARIES_32) $(BINARIES_64)
$(TARGETS_C_BOTHBITS:%=%_32): %_32: %.c
$(TARGETS_C_32BIT_ALL:%=%_32): %_32: %.c
$(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
$(TARGETS_C_BOTHBITS:%=%_64): %_64: %.c
......
/*
* entry_from_vm86.c - tests kernel entries from vm86 mode
* Copyright (c) 2014-2015 Andrew Lutomirski
*
* This exercises a few paths that need to special-case vm86 mode.
*
* GPL v2.
*/
#define _GNU_SOURCE
#include <assert.h>
#include <stdlib.h>
#include <sys/syscall.h>
#include <sys/signal.h>
#include <sys/ucontext.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <err.h>
#include <stddef.h>
#include <stdbool.h>
#include <errno.h>
#include <sys/vm86.h>
static unsigned long load_addr = 0x10000;
static int nerrs = 0;
asm (
".pushsection .rodata\n\t"
".type vmcode_bound, @object\n\t"
"vmcode:\n\t"
"vmcode_bound:\n\t"
".code16\n\t"
"bound %ax, (2048)\n\t"
"int3\n\t"
"vmcode_sysenter:\n\t"
"sysenter\n\t"
".size vmcode, . - vmcode\n\t"
"end_vmcode:\n\t"
".code32\n\t"
".popsection"
);
extern unsigned char vmcode[], end_vmcode[];
extern unsigned char vmcode_bound[], vmcode_sysenter[];
static void do_test(struct vm86plus_struct *v86, unsigned long eip,
const char *text)
{
long ret;
printf("[RUN]\t%s from vm86 mode\n", text);
v86->regs.eip = eip;
ret = vm86(VM86_ENTER, v86);
if (ret == -1 && errno == ENOSYS) {
printf("[SKIP]\tvm86 not supported\n");
return;
}
if (VM86_TYPE(ret) == VM86_INTx) {
char trapname[32];
int trapno = VM86_ARG(ret);
if (trapno == 13)
strcpy(trapname, "GP");
else if (trapno == 5)
strcpy(trapname, "BR");
else if (trapno == 14)
strcpy(trapname, "PF");
else
sprintf(trapname, "%d", trapno);
printf("[OK]\tExited vm86 mode due to #%s\n", trapname);
} else if (VM86_TYPE(ret) == VM86_UNKNOWN) {
printf("[OK]\tExited vm86 mode due to unhandled GP fault\n");
} else {
printf("[OK]\tExited vm86 mode due to type %ld, arg %ld\n",
VM86_TYPE(ret), VM86_ARG(ret));
}
}
int main(void)
{
struct vm86plus_struct v86;
unsigned char *addr = mmap((void *)load_addr, 4096,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, -1,0);
if (addr != (unsigned char *)load_addr)
err(1, "mmap");
memcpy(addr, vmcode, end_vmcode - vmcode);
addr[2048] = 2;
addr[2050] = 3;
memset(&v86, 0, sizeof(v86));
v86.regs.cs = load_addr / 16;
v86.regs.ss = load_addr / 16;
v86.regs.ds = load_addr / 16;
v86.regs.es = load_addr / 16;
assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */
/* #BR -- should deliver SIG??? */
do_test(&v86, vmcode_bound - vmcode, "#BR");
/* SYSENTER -- should cause #GP or #UD depending on CPU */
do_test(&v86, vmcode_sysenter - vmcode, "SYSENTER");
return (nerrs == 0 ? 0 : 1);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment