Commit 59e97e4d authored by Andy Lutomirski's avatar Andy Lutomirski Committed by H. Peter Anvin

x86: Make alternative instruction pointers relative

This save a few bytes on x86-64 and means that future patches can
apply alternatives to unrelocated code.
Signed-off-by: default avatarAndy Lutomirski <luto@mit.edu>
Link: http://lkml.kernel.org/r/ff64a6b9a1a3860ca4a7b8b6dc7b4754f9491cd7.1310563276.git.luto@mit.eduSigned-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent c9712944
...@@ -17,8 +17,8 @@ ...@@ -17,8 +17,8 @@
.macro altinstruction_entry orig alt feature orig_len alt_len .macro altinstruction_entry orig alt feature orig_len alt_len
.align 8 .align 8
.quad \orig .long \orig - .
.quad \alt .long \alt - .
.word \feature .word \feature
.byte \orig_len .byte \orig_len
.byte \alt_len .byte \alt_len
......
...@@ -43,8 +43,8 @@ ...@@ -43,8 +43,8 @@
#endif #endif
struct alt_instr { struct alt_instr {
u8 *instr; /* original instruction */ s32 instr_offset; /* original instruction */
u8 *replacement; s32 repl_offset; /* offset to replacement instruction */
u16 cpuid; /* cpuid bit set for replacement */ u16 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */ u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction, <= instrlen */ u8 replacementlen; /* length of new instruction, <= instrlen */
...@@ -84,8 +84,8 @@ static inline int alternatives_text_reserved(void *start, void *end) ...@@ -84,8 +84,8 @@ static inline int alternatives_text_reserved(void *start, void *end)
"661:\n\t" oldinstr "\n662:\n" \ "661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \ ".section .altinstructions,\"a\"\n" \
_ASM_ALIGN "\n" \ _ASM_ALIGN "\n" \
_ASM_PTR "661b\n" /* label */ \ " .long 661b - .\n" /* label */ \
_ASM_PTR "663f\n" /* new instruction */ \ " .long 663f - .\n" /* new instruction */ \
" .word " __stringify(feature) "\n" /* feature bit */ \ " .word " __stringify(feature) "\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \ " .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \ " .byte 664f-663f\n" /* replacementlen */ \
......
...@@ -331,8 +331,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) ...@@ -331,8 +331,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
"2:\n" "2:\n"
".section .altinstructions,\"a\"\n" ".section .altinstructions,\"a\"\n"
_ASM_ALIGN "\n" _ASM_ALIGN "\n"
_ASM_PTR "1b\n" " .long 1b - .\n"
_ASM_PTR "0\n" /* no replacement */ " .long 0\n" /* no replacement */
" .word %P0\n" /* feature bit */ " .word %P0\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */ " .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */ " .byte 0\n" /* replacement len */
...@@ -349,8 +349,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) ...@@ -349,8 +349,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
"2:\n" "2:\n"
".section .altinstructions,\"a\"\n" ".section .altinstructions,\"a\"\n"
_ASM_ALIGN "\n" _ASM_ALIGN "\n"
_ASM_PTR "1b\n" " .long 1b - .\n"
_ASM_PTR "3f\n" " .long 3f - .\n"
" .word %P1\n" /* feature bit */ " .word %P1\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */ " .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */ " .byte 4f - 3f\n" /* replacement len */
......
...@@ -263,6 +263,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start, ...@@ -263,6 +263,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
struct alt_instr *end) struct alt_instr *end)
{ {
struct alt_instr *a; struct alt_instr *a;
u8 *instr, *replacement;
u8 insnbuf[MAX_PATCH_LEN]; u8 insnbuf[MAX_PATCH_LEN];
DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
...@@ -276,25 +277,29 @@ void __init_or_module apply_alternatives(struct alt_instr *start, ...@@ -276,25 +277,29 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
* order. * order.
*/ */
for (a = start; a < end; a++) { for (a = start; a < end; a++) {
u8 *instr = a->instr; instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset;
BUG_ON(a->replacementlen > a->instrlen); BUG_ON(a->replacementlen > a->instrlen);
BUG_ON(a->instrlen > sizeof(insnbuf)); BUG_ON(a->instrlen > sizeof(insnbuf));
BUG_ON(a->cpuid >= NCAPINTS*32); BUG_ON(a->cpuid >= NCAPINTS*32);
if (!boot_cpu_has(a->cpuid)) if (!boot_cpu_has(a->cpuid))
continue; continue;
memcpy(insnbuf, replacement, a->replacementlen);
/* 0xe8 is a relative jump; fix the offset. */
if (*insnbuf == 0xe8 && a->replacementlen == 5)
*(s32 *)(insnbuf + 1) += replacement - instr;
add_nops(insnbuf + a->replacementlen,
a->instrlen - a->replacementlen);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* vsyscall code is not mapped yet. resolve it manually. */ /* vsyscall code is not mapped yet. resolve it manually. */
if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) { if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0)); instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
DPRINTK("%s: vsyscall fixup: %p => %p\n",
__func__, a->instr, instr);
} }
#endif #endif
memcpy(insnbuf, a->replacement, a->replacementlen);
if (*insnbuf == 0xe8 && a->replacementlen == 5)
*(s32 *)(insnbuf + 1) += a->replacement - a->instr;
add_nops(insnbuf + a->replacementlen,
a->instrlen - a->replacementlen);
text_poke_early(instr, insnbuf, a->instrlen); text_poke_early(instr, insnbuf, a->instrlen);
} }
} }
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h> #include <asm/dwarf2.h>
#include <asm/alternative-asm.h>
ALIGN ALIGN
copy_page_c: copy_page_c:
...@@ -110,10 +111,6 @@ ENDPROC(copy_page) ...@@ -110,10 +111,6 @@ ENDPROC(copy_page)
2: 2:
.previous .previous
.section .altinstructions,"a" .section .altinstructions,"a"
.align 8 altinstruction_entry copy_page, 1b, X86_FEATURE_REP_GOOD, \
.quad copy_page .Lcopy_page_end-copy_page, 2b-1b
.quad 1b
.word X86_FEATURE_REP_GOOD
.byte .Lcopy_page_end - copy_page
.byte 2b - 1b
.previous .previous
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/dwarf2.h> #include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h>
#undef memmove #undef memmove
...@@ -214,11 +215,9 @@ ENTRY(memmove) ...@@ -214,11 +215,9 @@ ENTRY(memmove)
.previous .previous
.section .altinstructions,"a" .section .altinstructions,"a"
.align 8 altinstruction_entry .Lmemmove_begin_forward, \
.quad .Lmemmove_begin_forward .Lmemmove_begin_forward_efs,X86_FEATURE_ERMS, \
.quad .Lmemmove_begin_forward_efs .Lmemmove_end_forward-.Lmemmove_begin_forward, \
.word X86_FEATURE_ERMS .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs
.byte .Lmemmove_end_forward-.Lmemmove_begin_forward
.byte .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs
.previous .previous
ENDPROC(memmove) ENDPROC(memmove)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment