Commit f796c758 authored by Borislav Petkov (AMD)'s avatar Borislav Petkov (AMD)

x86/alternatives: Use a temporary buffer when optimizing NOPs

Instead of optimizing NOPs in-place, use a temporary buffer like the
usual alternatives patching flow does. This obviates the need to grab
locks when patching, see

  6778977590da ("x86/alternatives: Disable interrupts and sync when optimizing NOPs in place")

While at it, add nomenclature definitions clarifying and simplifying the
naming of function-local variables in the alternatives code.
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20240130105941.19707-2-bp@alien8.de
parent ee896208
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
extern void text_poke_early(void *addr, const void *opcode, size_t len); extern void text_poke_early(void *addr, const void *opcode, size_t len);
extern void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len); extern void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len);
/* /*
* Clear and restore the kernel write-protection flag on the local CPU. * Clear and restore the kernel write-protection flag on the local CPU.
......
...@@ -124,6 +124,20 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] = ...@@ -124,6 +124,20 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
#endif #endif
}; };
/*
* Nomenclature for variable names to simplify and clarify this code and ease
* any potential staring at it:
*
* @instr: source address of the original instructions in the kernel text as
* generated by the compiler.
*
* @buf: temporary buffer on which the patching operates. This buffer is
* eventually text-poked into the kernel image.
*
* @replacement/@repl: pointer to the opcodes which are replacing @instr, located
* in the .altinstr_replacement section.
*/
/* /*
* Fill the buffer with a single effective instruction of size @len. * Fill the buffer with a single effective instruction of size @len.
* *
...@@ -133,28 +147,28 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] = ...@@ -133,28 +147,28 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
* each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and * each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and
* *jump* over instead of executing long and daft NOPs. * *jump* over instead of executing long and daft NOPs.
*/ */
static void add_nop(u8 *instr, unsigned int len) static void add_nop(u8 *buf, unsigned int len)
{ {
u8 *target = instr + len; u8 *target = buf + len;
if (!len) if (!len)
return; return;
if (len <= ASM_NOP_MAX) { if (len <= ASM_NOP_MAX) {
memcpy(instr, x86_nops[len], len); memcpy(buf, x86_nops[len], len);
return; return;
} }
if (len < 128) { if (len < 128) {
__text_gen_insn(instr, JMP8_INSN_OPCODE, instr, target, JMP8_INSN_SIZE); __text_gen_insn(buf, JMP8_INSN_OPCODE, buf, target, JMP8_INSN_SIZE);
instr += JMP8_INSN_SIZE; buf += JMP8_INSN_SIZE;
} else { } else {
__text_gen_insn(instr, JMP32_INSN_OPCODE, instr, target, JMP32_INSN_SIZE); __text_gen_insn(buf, JMP32_INSN_OPCODE, buf, target, JMP32_INSN_SIZE);
instr += JMP32_INSN_SIZE; buf += JMP32_INSN_SIZE;
} }
for (;instr < target; instr++) for (;buf < target; buf++)
*instr = INT3_INSN_OPCODE; *buf = INT3_INSN_OPCODE;
} }
extern s32 __retpoline_sites[], __retpoline_sites_end[]; extern s32 __retpoline_sites[], __retpoline_sites_end[];
...@@ -187,12 +201,12 @@ static bool insn_is_nop(struct insn *insn) ...@@ -187,12 +201,12 @@ static bool insn_is_nop(struct insn *insn)
* Find the offset of the first non-NOP instruction starting at @offset * Find the offset of the first non-NOP instruction starting at @offset
* but no further than @len. * but no further than @len.
*/ */
static int skip_nops(u8 *instr, int offset, int len) static int skip_nops(u8 *buf, int offset, int len)
{ {
struct insn insn; struct insn insn;
for (; offset < len; offset += insn.length) { for (; offset < len; offset += insn.length) {
if (insn_decode_kernel(&insn, &instr[offset])) if (insn_decode_kernel(&insn, &buf[offset]))
break; break;
if (!insn_is_nop(&insn)) if (!insn_is_nop(&insn))
...@@ -207,7 +221,7 @@ static int skip_nops(u8 *instr, int offset, int len) ...@@ -207,7 +221,7 @@ static int skip_nops(u8 *instr, int offset, int len)
* to the end of the NOP sequence into a single NOP. * to the end of the NOP sequence into a single NOP.
*/ */
static bool static bool
__optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev, int *target) __optimize_nops(const u8 * const instr, u8 *buf, size_t len, struct insn *insn, int *next, int *prev, int *target)
{ {
int i = *next - insn->length; int i = *next - insn->length;
...@@ -222,12 +236,12 @@ __optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev, ...@@ -222,12 +236,12 @@ __optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev,
if (insn_is_nop(insn)) { if (insn_is_nop(insn)) {
int nop = i; int nop = i;
*next = skip_nops(instr, *next, len); *next = skip_nops(buf, *next, len);
if (*target && *next == *target) if (*target && *next == *target)
nop = *prev; nop = *prev;
add_nop(instr + nop, *next - nop); add_nop(buf + nop, *next - nop);
DUMP_BYTES(ALT, instr, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, *next); DUMP_BYTES(ALT, buf, len, "%px: [%d:%d) optimized NOPs: ", instr, nop, *next);
return true; return true;
} }
...@@ -239,32 +253,22 @@ __optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev, ...@@ -239,32 +253,22 @@ __optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev,
* "noinline" to cause control flow change and thus invalidate I$ and * "noinline" to cause control flow change and thus invalidate I$ and
* cause refetch after modification. * cause refetch after modification.
*/ */
static void __init_or_module noinline optimize_nops(u8 *instr, size_t len) static void __init_or_module noinline optimize_nops(const u8 * const instr, u8 *buf, size_t len)
{ {
int prev, target = 0; int prev, target = 0;
for (int next, i = 0; i < len; i = next) { for (int next, i = 0; i < len; i = next) {
struct insn insn; struct insn insn;
if (insn_decode_kernel(&insn, &instr[i])) if (insn_decode_kernel(&insn, &buf[i]))
return; return;
next = i + insn.length; next = i + insn.length;
__optimize_nops(instr, len, &insn, &next, &prev, &target); __optimize_nops(instr, buf, len, &insn, &next, &prev, &target);
} }
} }
static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len)
{
unsigned long flags;
local_irq_save(flags);
optimize_nops(instr, len);
sync_core();
local_irq_restore(flags);
}
/* /*
* In this context, "source" is where the instructions are placed in the * In this context, "source" is where the instructions are placed in the
* section .altinstr_replacement, for example during kernel build by the * section .altinstr_replacement, for example during kernel build by the
...@@ -335,11 +339,11 @@ bool need_reloc(unsigned long offset, u8 *src, size_t src_len) ...@@ -335,11 +339,11 @@ bool need_reloc(unsigned long offset, u8 *src, size_t src_len)
return (target < src || target > src + src_len); return (target < src || target > src + src_len);
} }
void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len) void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len)
{ {
int prev, target = 0; int prev, target = 0;
for (int next, i = 0; i < len; i = next) { for (int next, i = 0; i < instrlen; i = next) {
struct insn insn; struct insn insn;
if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i]))) if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i])))
...@@ -347,7 +351,7 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len) ...@@ -347,7 +351,7 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
next = i + insn.length; next = i + insn.length;
if (__optimize_nops(buf, len, &insn, &next, &prev, &target)) if (__optimize_nops(instr, buf, instrlen, &insn, &next, &prev, &target))
continue; continue;
switch (insn.opcode.bytes[0]) { switch (insn.opcode.bytes[0]) {
...@@ -361,10 +365,10 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len) ...@@ -361,10 +365,10 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
case JMP8_INSN_OPCODE: case JMP8_INSN_OPCODE:
case JMP32_INSN_OPCODE: case JMP32_INSN_OPCODE:
case CALL_INSN_OPCODE: case CALL_INSN_OPCODE:
if (need_reloc(next + insn.immediate.value, src, src_len)) { if (need_reloc(next + insn.immediate.value, repl, repl_len)) {
apply_reloc(insn.immediate.nbytes, apply_reloc(insn.immediate.nbytes,
buf + i + insn_offset_immediate(&insn), buf + i + insn_offset_immediate(&insn),
src - dest); repl - instr);
} }
/* /*
...@@ -372,7 +376,7 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len) ...@@ -372,7 +376,7 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
*/ */
if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) { if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) {
s32 imm = insn.immediate.value; s32 imm = insn.immediate.value;
imm += src - dest; imm += repl - instr;
imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE; imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE;
if ((imm >> 31) == (imm >> 7)) { if ((imm >> 31) == (imm >> 7)) {
buf[i+0] = JMP8_INSN_OPCODE; buf[i+0] = JMP8_INSN_OPCODE;
...@@ -385,10 +389,10 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len) ...@@ -385,10 +389,10 @@ void apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len)
} }
if (insn_rip_relative(&insn)) { if (insn_rip_relative(&insn)) {
if (need_reloc(next + insn.displacement.value, src, src_len)) { if (need_reloc(next + insn.displacement.value, repl, repl_len)) {
apply_reloc(insn.displacement.nbytes, apply_reloc(insn.displacement.nbytes,
buf + i + insn_offset_displacement(&insn), buf + i + insn_offset_displacement(&insn),
src - dest); repl - instr);
} }
} }
} }
...@@ -504,7 +508,9 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, ...@@ -504,7 +508,9 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
* patch if feature is *NOT* present. * patch if feature is *NOT* present.
*/ */
if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) { if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
optimize_nops_inplace(instr, a->instrlen); memcpy(insn_buff, instr, a->instrlen);
optimize_nops(instr, insn_buff, a->instrlen);
text_poke_early(instr, insn_buff, a->instrlen);
continue; continue;
} }
...@@ -526,7 +532,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, ...@@ -526,7 +532,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
for (; insn_buff_sz < a->instrlen; insn_buff_sz++) for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
insn_buff[insn_buff_sz] = 0x90; insn_buff[insn_buff_sz] = 0x90;
apply_relocation(insn_buff, a->instrlen, instr, replacement, a->replacementlen); apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen);
DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr); DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr);
DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement); DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
...@@ -761,7 +767,7 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) ...@@ -761,7 +767,7 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
len = patch_retpoline(addr, &insn, bytes); len = patch_retpoline(addr, &insn, bytes);
if (len == insn.length) { if (len == insn.length) {
optimize_nops(bytes, len); optimize_nops(addr, bytes, len);
DUMP_BYTES(RETPOLINE, ((u8*)addr), len, "%px: orig: ", addr); DUMP_BYTES(RETPOLINE, ((u8*)addr), len, "%px: orig: ", addr);
DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr); DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr);
text_poke_early(addr, bytes, len); text_poke_early(addr, bytes, len);
......
...@@ -185,8 +185,7 @@ static void *patch_dest(void *dest, bool direct) ...@@ -185,8 +185,7 @@ static void *patch_dest(void *dest, bool direct)
u8 *pad = dest - tsize; u8 *pad = dest - tsize;
memcpy(insn_buff, skl_call_thunk_template, tsize); memcpy(insn_buff, skl_call_thunk_template, tsize);
apply_relocation(insn_buff, tsize, pad, apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize);
skl_call_thunk_template, tsize);
/* Already patched? */ /* Already patched? */
if (!bcmp(pad, insn_buff, tsize)) if (!bcmp(pad, insn_buff, tsize))
...@@ -308,8 +307,7 @@ static bool is_callthunk(void *addr) ...@@ -308,8 +307,7 @@ static bool is_callthunk(void *addr)
pad = (void *)(dest - tmpl_size); pad = (void *)(dest - tmpl_size);
memcpy(insn_buff, skl_call_thunk_template, tmpl_size); memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
apply_relocation(insn_buff, tmpl_size, pad, apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size);
skl_call_thunk_template, tmpl_size);
return !bcmp(pad, insn_buff, tmpl_size); return !bcmp(pad, insn_buff, tmpl_size);
} }
...@@ -327,8 +325,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip) ...@@ -327,8 +325,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
return 0; return 0;
memcpy(insn_buff, skl_call_thunk_template, tmpl_size); memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
apply_relocation(insn_buff, tmpl_size, ip, apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size);
skl_call_thunk_template, tmpl_size);
memcpy(*pprog, insn_buff, tmpl_size); memcpy(*pprog, insn_buff, tmpl_size);
*pprog += tmpl_size; *pprog += tmpl_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment