Commit ae24ffe5 authored by Brian Gerst's avatar Brian Gerst Committed by Ingo Molnar

x86, 64-bit: Move K8 B step iret fixup to fault entry asm

Move the handling of truncated %rip from an iret fault to the fault
entry path.

This allows x86-64 to use the standard search_extable() function.
Signed-off-by: default avatarBrian Gerst <brgerst@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jan Beulich <jbeulich@novell.com>
LKML-Reference: <1255357103-5418-1-git-send-email-brgerst@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f3834b9e
...@@ -570,7 +570,6 @@ extern struct movsl_mask { ...@@ -570,7 +570,6 @@ extern struct movsl_mask {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# include "uaccess_32.h" # include "uaccess_32.h"
#else #else
# define ARCH_HAS_SEARCH_EXTABLE
# include "uaccess_64.h" # include "uaccess_64.h"
#endif #endif
......
...@@ -1491,12 +1491,17 @@ error_kernelspace: ...@@ -1491,12 +1491,17 @@ error_kernelspace:
leaq irq_return(%rip),%rcx leaq irq_return(%rip),%rcx
cmpq %rcx,RIP+8(%rsp) cmpq %rcx,RIP+8(%rsp)
je error_swapgs je error_swapgs
movl %ecx,%ecx /* zero extend */ movl %ecx,%eax /* zero extend */
cmpq %rcx,RIP+8(%rsp) cmpq %rax,RIP+8(%rsp)
je error_swapgs je bstep_iret
cmpq $gs_change,RIP+8(%rsp) cmpq $gs_change,RIP+8(%rsp)
je error_swapgs je error_swapgs
jmp error_sti jmp error_sti
bstep_iret:
/* Fix truncated RIP */
movq %rcx,RIP+8(%rsp)
je error_swapgs
END(error_entry) END(error_entry)
......
...@@ -35,34 +35,3 @@ int fixup_exception(struct pt_regs *regs) ...@@ -35,34 +35,3 @@ int fixup_exception(struct pt_regs *regs)
return 0; return 0;
} }
#ifdef CONFIG_X86_64
/*
* Need to defined our own search_extable on X86_64 to work around
* a B stepping K8 bug.
*/
const struct exception_table_entry *
search_extable(const struct exception_table_entry *first,
const struct exception_table_entry *last,
unsigned long value)
{
/* B stepping K8 bug */
if ((value >> 32) == 0)
value |= 0xffffffffUL << 32;
while (first <= last) {
const struct exception_table_entry *mid;
long diff;
mid = (last - first) / 2 + first;
diff = mid->insn - value;
if (diff == 0)
return mid;
else if (diff < 0)
first = mid+1;
else
last = mid-1;
}
return NULL;
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment