Commit 3a5dc1fa authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-microcode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 microcode loading updates from Ingo Molnar:
 "The main changes in this cycle are:

   - Reload microcode when resuming and the case when only the early
     loader has been utilized.  (Borislav Petkov)

   - Also, do not load the driver on paravirt guests.  (Boris
     Ostrovsky)"

* 'x86-microcode-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/microcode/intel: Fish out the stashed microcode for the BSP
  x86, microcode: Reload microcode on resume
  x86, microcode: Don't initialize microcode code on paravirt
  x86, microcode, intel: Drop unused parameter
  x86, microcode, AMD: Do not use smp_processor_id() in preemtible context
parents 3100e448 25cdb9c8
...@@ -78,6 +78,7 @@ static inline void __exit exit_amd_microcode(void) {} ...@@ -78,6 +78,7 @@ static inline void __exit exit_amd_microcode(void) {}
extern void __init load_ucode_bsp(void); extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void); extern void load_ucode_ap(void);
extern int __init save_microcode_in_initrd(void); extern int __init save_microcode_in_initrd(void);
void reload_early_microcode(void);
#else #else
static inline void __init load_ucode_bsp(void) {} static inline void __init load_ucode_bsp(void) {}
static inline void load_ucode_ap(void) {} static inline void load_ucode_ap(void) {}
...@@ -85,6 +86,7 @@ static inline int __init save_microcode_in_initrd(void) ...@@ -85,6 +86,7 @@ static inline int __init save_microcode_in_initrd(void)
{ {
return 0; return 0;
} }
static inline void reload_early_microcode(void) {}
#endif #endif
#endif /* _ASM_X86_MICROCODE_H */ #endif /* _ASM_X86_MICROCODE_H */
...@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table, ...@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
extern int __apply_microcode_amd(struct microcode_amd *mc_amd); extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
extern int apply_microcode_amd(int cpu); extern int apply_microcode_amd(int cpu);
extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
#define PATCH_MAX_SIZE PAGE_SIZE #define PATCH_MAX_SIZE PAGE_SIZE
extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
...@@ -68,10 +68,12 @@ extern u8 amd_ucode_patch[PATCH_MAX_SIZE]; ...@@ -68,10 +68,12 @@ extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
extern void __init load_ucode_amd_bsp(void); extern void __init load_ucode_amd_bsp(void);
extern void load_ucode_amd_ap(void); extern void load_ucode_amd_ap(void);
extern int __init save_microcode_in_initrd_amd(void); extern int __init save_microcode_in_initrd_amd(void);
void reload_ucode_amd(void);
#else #else
static inline void __init load_ucode_amd_bsp(void) {} static inline void __init load_ucode_amd_bsp(void) {}
static inline void load_ucode_amd_ap(void) {} static inline void load_ucode_amd_ap(void) {}
static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; } static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
void reload_ucode_amd(void) {}
#endif #endif
#endif /* _ASM_X86_MICROCODE_AMD_H */ #endif /* _ASM_X86_MICROCODE_AMD_H */
...@@ -68,11 +68,13 @@ extern void __init load_ucode_intel_bsp(void); ...@@ -68,11 +68,13 @@ extern void __init load_ucode_intel_bsp(void);
extern void load_ucode_intel_ap(void); extern void load_ucode_intel_ap(void);
extern void show_ucode_info_early(void); extern void show_ucode_info_early(void);
extern int __init save_microcode_in_initrd_intel(void); extern int __init save_microcode_in_initrd_intel(void);
void reload_ucode_intel(void);
#else #else
static inline __init void load_ucode_intel_bsp(void) {} static inline __init void load_ucode_intel_bsp(void) {}
static inline void load_ucode_intel_ap(void) {} static inline void load_ucode_intel_ap(void) {}
static inline void show_ucode_info_early(void) {} static inline void show_ucode_info_early(void) {}
static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; } static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; }
static inline void reload_ucode_intel(void) {}
#endif #endif
#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
......
...@@ -376,7 +376,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, ...@@ -376,7 +376,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
return UCODE_OK; return UCODE_OK;
} }
enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
{ {
enum ucode_state ret; enum ucode_state ret;
...@@ -390,8 +390,8 @@ enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) ...@@ -390,8 +390,8 @@ enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
/* save BSP's matching patch for early load */ /* save BSP's matching patch for early load */
if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) { if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
struct ucode_patch *p = find_patch(smp_processor_id()); struct ucode_patch *p = find_patch(cpu);
if (p) { if (p) {
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
...@@ -444,7 +444,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, ...@@ -444,7 +444,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
goto fw_release; goto fw_release;
} }
ret = load_microcode_amd(c->x86, fw->data, fw->size); ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
fw_release: fw_release:
release_firmware(fw); release_firmware(fw);
......
...@@ -389,7 +389,7 @@ int __init save_microcode_in_initrd_amd(void) ...@@ -389,7 +389,7 @@ int __init save_microcode_in_initrd_amd(void)
eax = cpuid_eax(0x00000001); eax = cpuid_eax(0x00000001);
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
ret = load_microcode_amd(eax, container, container_size); ret = load_microcode_amd(smp_processor_id(), eax, container, container_size);
if (ret != UCODE_OK) if (ret != UCODE_OK)
retval = -EINVAL; retval = -EINVAL;
...@@ -402,3 +402,21 @@ int __init save_microcode_in_initrd_amd(void) ...@@ -402,3 +402,21 @@ int __init save_microcode_in_initrd_amd(void)
return retval; return retval;
} }
void reload_ucode_amd(void)
{
struct microcode_amd *mc;
u32 rev, eax;
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
mc = (struct microcode_amd *)amd_ucode_patch;
if (mc && rev < mc->hdr.patch_id) {
if (!__apply_microcode_amd(mc)) {
ucode_new_rev = mc->hdr.patch_id;
pr_info("microcode: reload patch_level=0x%08x\n",
ucode_new_rev);
}
}
}
...@@ -465,16 +465,8 @@ static void mc_bp_resume(void) ...@@ -465,16 +465,8 @@ static void mc_bp_resume(void)
if (uci->valid && uci->mc) if (uci->valid && uci->mc)
microcode_ops->apply_microcode(cpu); microcode_ops->apply_microcode(cpu);
#ifdef CONFIG_X86_64
else if (!uci->mc) else if (!uci->mc)
/* reload_early_microcode();
* We might resume and not have applied late microcode but still
* have a newer patch stashed from the early loader. We don't
* have it in uci->mc so we have to load it the same way we're
* applying patches early on the APs.
*/
load_ucode_ap();
#endif
} }
static struct syscore_ops mc_syscore_ops = { static struct syscore_ops mc_syscore_ops = {
...@@ -559,7 +551,7 @@ static int __init microcode_init(void) ...@@ -559,7 +551,7 @@ static int __init microcode_init(void)
struct cpuinfo_x86 *c = &cpu_data(0); struct cpuinfo_x86 *c = &cpu_data(0);
int error; int error;
if (dis_ucode_ldr) if (paravirt_enabled() || dis_ucode_ldr)
return 0; return 0;
if (c->x86_vendor == X86_VENDOR_INTEL) if (c->x86_vendor == X86_VENDOR_INTEL)
......
...@@ -176,3 +176,24 @@ int __init save_microcode_in_initrd(void) ...@@ -176,3 +176,24 @@ int __init save_microcode_in_initrd(void)
return 0; return 0;
} }
void reload_early_microcode(void)
{
int vendor, x86;
vendor = x86_vendor();
x86 = x86_family();
switch (vendor) {
case X86_VENDOR_INTEL:
if (x86 >= 6)
reload_ucode_intel();
break;
case X86_VENDOR_AMD:
if (x86 >= 0x10)
reload_ucode_amd();
break;
default:
break;
}
}
...@@ -650,8 +650,7 @@ static inline void print_ucode(struct ucode_cpu_info *uci) ...@@ -650,8 +650,7 @@ static inline void print_ucode(struct ucode_cpu_info *uci)
} }
#endif #endif
static int apply_microcode_early(struct mc_saved_data *mc_saved_data, static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
struct ucode_cpu_info *uci)
{ {
struct microcode_intel *mc_intel; struct microcode_intel *mc_intel;
unsigned int val[2]; unsigned int val[2];
...@@ -680,7 +679,10 @@ static int apply_microcode_early(struct mc_saved_data *mc_saved_data, ...@@ -680,7 +679,10 @@ static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
#endif #endif
uci->cpu_sig.rev = val[1]; uci->cpu_sig.rev = val[1];
if (early)
print_ucode(uci); print_ucode(uci);
else
print_ucode_info(uci, mc_intel->hdr.date);
return 0; return 0;
} }
...@@ -715,12 +717,17 @@ _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data, ...@@ -715,12 +717,17 @@ _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
unsigned long initrd_end_early, unsigned long initrd_end_early,
struct ucode_cpu_info *uci) struct ucode_cpu_info *uci)
{ {
enum ucode_state ret;
collect_cpu_info_early(uci); collect_cpu_info_early(uci);
scan_microcode(initrd_start_early, initrd_end_early, mc_saved_data, scan_microcode(initrd_start_early, initrd_end_early, mc_saved_data,
mc_saved_in_initrd, uci); mc_saved_in_initrd, uci);
load_microcode(mc_saved_data, mc_saved_in_initrd,
ret = load_microcode(mc_saved_data, mc_saved_in_initrd,
initrd_start_early, uci); initrd_start_early, uci);
apply_microcode_early(mc_saved_data, uci);
if (ret == UCODE_OK)
apply_microcode_early(uci, true);
} }
void __init void __init
...@@ -749,7 +756,8 @@ load_ucode_intel_bsp(void) ...@@ -749,7 +756,8 @@ load_ucode_intel_bsp(void)
initrd_end_early = initrd_start_early + ramdisk_size; initrd_end_early = initrd_start_early + ramdisk_size;
_load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd,
initrd_start_early, initrd_end_early, &uci); initrd_start_early, initrd_end_early,
&uci);
#endif #endif
} }
...@@ -783,5 +791,23 @@ void load_ucode_intel_ap(void) ...@@ -783,5 +791,23 @@ void load_ucode_intel_ap(void)
collect_cpu_info_early(&uci); collect_cpu_info_early(&uci);
load_microcode(mc_saved_data_p, mc_saved_in_initrd_p, load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
initrd_start_addr, &uci); initrd_start_addr, &uci);
apply_microcode_early(mc_saved_data_p, &uci); apply_microcode_early(&uci, true);
}
void reload_ucode_intel(void)
{
struct ucode_cpu_info uci;
enum ucode_state ret;
if (!mc_saved_data.mc_saved_count)
return;
collect_cpu_info_early(&uci);
ret = generic_load_microcode_early(mc_saved_data.mc_saved,
mc_saved_data.mc_saved_count, &uci);
if (ret != UCODE_OK)
return;
apply_microcode_early(&uci, false);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment