Commit 058dc498 authored by Borislav Petkov's avatar Borislav Petkov Committed by Ingo Molnar

x86/microcode: Export the microcode cache linked list

It will be used by both drivers so move it to core.c.

No functionality change.
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20161025095522.11964-6-bp@alien8.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f5bdfefb
...@@ -20,6 +20,15 @@ do { \ ...@@ -20,6 +20,15 @@ do { \
(u32)((u64)(val)), \ (u32)((u64)(val)), \
(u32)((u64)(val) >> 32)) (u32)((u64)(val) >> 32))
struct ucode_patch {
struct list_head plist;
void *data; /* Intel uses only this one */
u32 patch_id;
u16 equiv_cpu;
};
extern struct list_head microcode_cache;
struct cpu_signature { struct cpu_signature {
unsigned int sig; unsigned int sig;
unsigned int pf; unsigned int pf;
......
...@@ -39,15 +39,6 @@ ...@@ -39,15 +39,6 @@
static struct equiv_cpu_entry *equiv_cpu_table; static struct equiv_cpu_entry *equiv_cpu_table;
struct ucode_patch {
struct list_head plist;
void *data;
u32 patch_id;
u16 equiv_cpu;
};
static LIST_HEAD(pcache);
/* /*
* This points to the current valid container of microcode patches which we will * This points to the current valid container of microcode patches which we will
* save from the initrd before jettisoning its contents. * save from the initrd before jettisoning its contents.
...@@ -312,9 +303,9 @@ void __init load_ucode_amd_bsp(unsigned int family) ...@@ -312,9 +303,9 @@ void __init load_ucode_amd_bsp(unsigned int family)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* /*
* On 32-bit, since AP's early load occurs before paging is turned on, we * On 32-bit, since AP's early load occurs before paging is turned on, we
* cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during * cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory.
* cold boot, AP will apply_ucode_in_initrd() just like the BSP. During * So during cold boot, AP will apply_ucode_in_initrd() just like the BSP.
* save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
* which is used upon resume from suspend. * which is used upon resume from suspend.
*/ */
void load_ucode_amd_ap(void) void load_ucode_amd_ap(void)
...@@ -508,7 +499,7 @@ static struct ucode_patch *cache_find_patch(u16 equiv_cpu) ...@@ -508,7 +499,7 @@ static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
{ {
struct ucode_patch *p; struct ucode_patch *p;
list_for_each_entry(p, &pcache, plist) list_for_each_entry(p, &microcode_cache, plist)
if (p->equiv_cpu == equiv_cpu) if (p->equiv_cpu == equiv_cpu)
return p; return p;
return NULL; return NULL;
...@@ -518,7 +509,7 @@ static void update_cache(struct ucode_patch *new_patch) ...@@ -518,7 +509,7 @@ static void update_cache(struct ucode_patch *new_patch)
{ {
struct ucode_patch *p; struct ucode_patch *p;
list_for_each_entry(p, &pcache, plist) { list_for_each_entry(p, &microcode_cache, plist) {
if (p->equiv_cpu == new_patch->equiv_cpu) { if (p->equiv_cpu == new_patch->equiv_cpu) {
if (p->patch_id >= new_patch->patch_id) if (p->patch_id >= new_patch->patch_id)
/* we already have the latest patch */ /* we already have the latest patch */
...@@ -531,14 +522,14 @@ static void update_cache(struct ucode_patch *new_patch) ...@@ -531,14 +522,14 @@ static void update_cache(struct ucode_patch *new_patch)
} }
} }
/* no patch found, add it */ /* no patch found, add it */
list_add_tail(&new_patch->plist, &pcache); list_add_tail(&new_patch->plist, &microcode_cache);
} }
static void free_cache(void) static void free_cache(void)
{ {
struct ucode_patch *p, *tmp; struct ucode_patch *p, *tmp;
list_for_each_entry_safe(p, tmp, &pcache, plist) { list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
__list_del(p->plist.prev, p->plist.next); __list_del(p->plist.prev, p->plist.next);
kfree(p->data); kfree(p->data);
kfree(p); kfree(p);
......
...@@ -45,6 +45,8 @@ ...@@ -45,6 +45,8 @@
static struct microcode_ops *microcode_ops; static struct microcode_ops *microcode_ops;
static bool dis_ucode_ldr; static bool dis_ucode_ldr;
LIST_HEAD(microcode_cache);
/* /*
* Synchronization. * Synchronization.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment