Commit e71bb4ec authored by Borislav Petkov's avatar Borislav Petkov Committed by Thomas Gleixner

x86/microcode/AMD: Unify load_ucode_amd_ap()

Use a version for both bitness by adding a helper which does the actual
container finding and parsing which can be used on any CPU - BSP or AP.
Streamlines the paths more.
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: http://lkml.kernel.org/r/20170120202955.4091-14-bp@alien8.deSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent f3ad136d
...@@ -261,7 +261,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) ...@@ -261,7 +261,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
#endif #endif
} }
void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
{ {
struct ucode_cpu_info *uci; struct ucode_cpu_info *uci;
struct cpio_data cp; struct cpio_data cp;
...@@ -281,89 +281,71 @@ void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) ...@@ -281,89 +281,71 @@ void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
cp = find_microcode_in_initrd(path, use_pa); cp = find_microcode_in_initrd(path, use_pa);
if (!(cp.data && cp.size))
return;
/* Needed in load_microcode_amd() */ /* Needed in load_microcode_amd() */
uci->cpu_sig.sig = cpuid_1_eax; uci->cpu_sig.sig = cpuid_1_eax;
apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL); *ret = cp;
} }
#ifdef CONFIG_X86_32 void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
/*
* On 32-bit, since AP's early load occurs before paging is turned on, we
* cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory.
* So during cold boot, AP will apply_ucode_in_initrd() just like the BSP.
* In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
* which is used upon resume from suspend.
*/
void load_ucode_amd_ap(unsigned int cpuid_1_eax)
{ {
struct microcode_amd *mc; struct cpio_data cp = { };
struct cpio_data cp;
mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); __load_ucode_amd(cpuid_1_eax, &cp);
if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
__apply_microcode_amd(mc);
return;
}
if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true);
if (!(cp.data && cp.size)) if (!(cp.data && cp.size))
return; return;
/*
* This would set amd_ucode_patch above so that the following APs can
* use it directly instead of going down this path again.
*/
apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL); apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true, NULL);
} }
#else
void load_ucode_amd_ap(unsigned int cpuid_1_eax) void load_ucode_amd_ap(unsigned int cpuid_1_eax)
{ {
struct equiv_cpu_entry *eq; struct equiv_cpu_entry *eq;
struct microcode_amd *mc; struct microcode_amd *mc;
struct cont_desc *desc;
u16 eq_id; u16 eq_id;
if (IS_ENABLED(CONFIG_X86_32)) {
mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
desc = (struct cont_desc *)__pa_nodebug(&cont);
} else {
mc = (struct microcode_amd *)amd_ucode_patch;
desc = &cont;
}
/* First AP hasn't cached it yet, go through the blob. */ /* First AP hasn't cached it yet, go through the blob. */
if (!cont.data) { if (!desc->data) {
struct cpio_data cp; struct cpio_data cp = { };
if (cont.size == -1) if (desc->size == -1)
return; return;
reget: reget:
if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) { __load_ucode_amd(cpuid_1_eax, &cp);
cp = find_microcode_in_initrd(ucode_path, false); if (!(cp.data && cp.size)) {
/*
if (!(cp.data && cp.size)) { * Mark it so that other APs do not scan again for no
/* * real reason and slow down boot needlessly.
* Mark it so that other APs do not scan again */
* for no real reason and slow down boot desc->size = -1;
* needlessly. return;
*/
cont.size = -1;
return;
}
} }
if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, &cont)) { if (!apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false, desc)) {
cont.data = NULL; desc->data = NULL;
cont.size = -1; desc->size = -1;
return; return;
} }
} }
eq = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ); eq = (struct equiv_cpu_entry *)(desc->data + CONTAINER_HDR_SZ);
eq_id = find_equiv_id(eq, cpuid_1_eax); eq_id = find_equiv_id(eq, cpuid_1_eax);
if (!eq_id) if (!eq_id)
return; return;
if (eq_id == cont.eq_id) { if (eq_id == desc->eq_id) {
u32 rev, dummy; u32 rev, dummy;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
...@@ -384,7 +366,6 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax) ...@@ -384,7 +366,6 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
goto reget; goto reget;
} }
} }
#endif /* CONFIG_X86_32 */
static enum ucode_state static enum ucode_state
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size); load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment