Commit a25a1d6c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-microcode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 microcode updates from Ingo Molnar:
 "The main changes are further simplification and unification of the
  code between the AMD and Intel microcode loaders, plus other
  simplifications - by Borislav Petkov"

* 'x86-microcode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/microcode/AMD: Remove struct cont_desc.eq_id
  x86/microcode/AMD: Remove AP scanning optimization
  x86/microcode/AMD: Simplify saving from initrd
  x86/microcode/AMD: Unify load_ucode_amd_ap()
  x86/microcode/AMD: Check patch level only on the BSP
  x86/microcode: Remove local vendor variable
  x86/microcode/AMD: Use find_microcode_in_initrd()
  x86/microcode/AMD: Get rid of global this_equiv_id
  x86/microcode: Decrease CPUID use
  x86/microcode/AMD: Rework container parsing
  x86/microcode/AMD: Extend the container struct
  x86/microcode/AMD: Shorten function parameter's name
  x86/microcode/AMD: Clean up find_equiv_id()
  x86/microcode: Convert to bare minimum MSR accessors
  x86/MSR: Carve out bare minimum accessors
parents 280d7a1e f26483ea
......@@ -195,7 +195,7 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
{
wrmsr_notrace(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
__wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
}
static inline u32 native_apic_msr_read(u32 reg)
......
......@@ -7,17 +7,16 @@
#define native_rdmsr(msr, val1, val2) \
do { \
u64 __val = native_read_msr((msr)); \
u64 __val = __rdmsr((msr)); \
(void)((val1) = (u32)__val); \
(void)((val2) = (u32)(__val >> 32)); \
} while (0)
#define native_wrmsr(msr, low, high) \
native_write_msr(msr, low, high)
__wrmsr(msr, low, high)
#define native_wrmsrl(msr, val) \
native_write_msr((msr), \
(u32)((u64)(val)), \
__wrmsr((msr), (u32)((u64)(val)), \
(u32)((u64)(val) >> 32))
struct ucode_patch {
......
......@@ -54,6 +54,4 @@ static inline int __init
save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
void reload_ucode_amd(void) {}
#endif
extern bool check_current_patch_level(u32 *rev, bool early);
#endif /* _ASM_X86_MICROCODE_AMD_H */
......@@ -80,7 +80,14 @@ static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
#endif
static inline unsigned long long native_read_msr(unsigned int msr)
/*
* __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR
* accessors and should not have any tracing or other functionality piggybacking
* on them - those are *purely* for accessing MSRs and nothing more. So don't even
* think of extending them - you will be slapped with a stinking trout or a frozen
* shark will reach you, wherever you are! You've been warned.
*/
static inline unsigned long long notrace __rdmsr(unsigned int msr)
{
DECLARE_ARGS(val, low, high);
......@@ -88,11 +95,30 @@ static inline unsigned long long native_read_msr(unsigned int msr)
"2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe)
: EAX_EDX_RET(val, low, high) : "c" (msr));
if (msr_tracepoint_active(__tracepoint_read_msr))
do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
return EAX_EDX_VAL(val, low, high);
}
static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
{
asm volatile("1: wrmsr\n"
"2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
: : "c" (msr), "a"(low), "d" (high) : "memory");
}
static inline unsigned long long native_read_msr(unsigned int msr)
{
unsigned long long val;
val = __rdmsr(msr);
if (msr_tracepoint_active(__tracepoint_read_msr))
do_trace_read_msr(msr, val, 0);
return val;
}
static inline unsigned long long native_read_msr_safe(unsigned int msr,
int *err)
{
......@@ -114,31 +140,16 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
return EAX_EDX_VAL(val, low, high);
}
/* Can be uninlined because referenced by paravirt */
static inline void notrace
__native_write_msr_notrace(unsigned int msr, u32 low, u32 high)
{
asm volatile("1: wrmsr\n"
"2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
: : "c" (msr), "a"(low), "d" (high) : "memory");
}
/* Can be uninlined because referenced by paravirt */
static inline void notrace
native_write_msr(unsigned int msr, u32 low, u32 high)
{
__native_write_msr_notrace(msr, low, high);
__wrmsr(msr, low, high);
if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
}
static inline void
wrmsr_notrace(unsigned int msr, u32 low, u32 high)
{
__native_write_msr_notrace(msr, low, high);
}
/* Can be uninlined because referenced by paravirt */
static inline int notrace
native_write_msr_safe(unsigned int msr, u32 low, u32 high)
......
This diff is collapsed.
......@@ -66,19 +66,50 @@ static DEFINE_MUTEX(microcode_mutex);
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
/*
* Operations that are run on a target cpu:
*/
struct cpu_info_ctx {
struct cpu_signature *cpu_sig;
int err;
};
/*
* Those patch levels cannot be updated to newer ones and thus should be final.
*/
static u32 final_levels[] = {
0x01000098,
0x0100009f,
0x010000af,
0, /* T-101 terminator */
};
/*
* Check the current patch level on this CPU.
*
* Returns:
* - true: if update should stop
* - false: otherwise
*/
static bool amd_check_current_patch_level(void)
{
u32 lvl, dummy, i;
u32 *levels;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
if (IS_ENABLED(CONFIG_X86_32))
levels = (u32 *)__pa_nodebug(&final_levels);
else
levels = final_levels;
for (i = 0; levels[i]; i++) {
if (lvl == levels[i])
return true;
}
return false;
}
static bool __init check_loader_disabled_bsp(void)
{
static const char *__dis_opt_str = "dis_ucode_ldr";
u32 a, b, c, d;
#ifdef CONFIG_X86_32
const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
......@@ -94,18 +125,19 @@ static bool __init check_loader_disabled_bsp(void)
if (!have_cpuid_p())
return *res;
a = 1;
c = 0;
native_cpuid(&a, &b, &c, &d);
/*
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
* completely accurate as xen pv guests don't see that CPUID bit set but
* that's good enough as they don't land on the BSP path anyway.
*/
if (c & BIT(31))
if (native_cpuid_ecx(1) & BIT(31))
return *res;
if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
if (amd_check_current_patch_level())
return *res;
}
if (cmdline_find_option_bool(cmdline, option) <= 0)
*res = false;
......@@ -133,23 +165,21 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
void __init load_ucode_bsp(void)
{
int vendor;
unsigned int family;
unsigned int cpuid_1_eax;
if (check_loader_disabled_bsp())
return;
vendor = x86_cpuid_vendor();
family = x86_cpuid_family();
cpuid_1_eax = native_cpuid_eax(1);
switch (vendor) {
switch (x86_cpuid_vendor()) {
case X86_VENDOR_INTEL:
if (family >= 6)
if (x86_family(cpuid_1_eax) >= 6)
load_ucode_intel_bsp();
break;
case X86_VENDOR_AMD:
if (family >= 0x10)
load_ucode_amd_bsp(family);
if (x86_family(cpuid_1_eax) >= 0x10)
load_ucode_amd_bsp(cpuid_1_eax);
break;
default:
break;
......@@ -167,22 +197,21 @@ static bool check_loader_disabled_ap(void)
void load_ucode_ap(void)
{
int vendor, family;
unsigned int cpuid_1_eax;
if (check_loader_disabled_ap())
return;
vendor = x86_cpuid_vendor();
family = x86_cpuid_family();
cpuid_1_eax = native_cpuid_eax(1);
switch (vendor) {
switch (x86_cpuid_vendor()) {
case X86_VENDOR_INTEL:
if (family >= 6)
if (x86_family(cpuid_1_eax) >= 6)
load_ucode_intel_ap();
break;
case X86_VENDOR_AMD:
if (family >= 0x10)
load_ucode_amd_ap(family);
if (x86_family(cpuid_1_eax) >= 0x10)
load_ucode_amd_ap(cpuid_1_eax);
break;
default:
break;
......@@ -201,7 +230,7 @@ static int __init save_microcode_in_initrd(void)
break;
case X86_VENDOR_AMD:
if (c->x86 >= 0x10)
ret = save_microcode_in_initrd_amd(c->x86);
return save_microcode_in_initrd_amd(cpuid_eax(1));
break;
default:
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment