Commit 7a9c2dd0 authored by Chen Yu's avatar Chen Yu Committed by Ingo Molnar

x86/pm: Introduce quirk framework to save/restore extra MSR registers around suspend/resume

A bug was reported that on certain Broadwell platforms, after
resuming from S3, the CPU is running at an anomalously low
speed.

It turns out that the BIOS has modified the value of the
THERM_CONTROL register during S3, and changed it from 0 to 0x10,
thus enabled clock modulation(bit4), but with undefined CPU Duty
Cycle(bit1:3) - which causes the problem.

Here is a simple scenario to reproduce the issue:

 1. Boot up the system
 2. Get MSR 0x19a, it should be 0
 3. Put the system into sleep, then wake it up
 4. Get MSR 0x19a, it shows 0x10, while it should be 0

Although some BIOSen want to change the CPU Duty Cycle during
S3, in our case we don't want the BIOS to do any modification.

Fix this issue by introducing a more generic x86 framework to
save/restore specified MSR registers(THERM_CONTROL in this case)
for suspend/resume. This allows us to fix similar bugs in a much
simpler way in the future.

When the kernel wants to protect certain MSRs during suspending,
we simply add a quirk entry in msr_save_dmi_table, and customize
the MSR registers inside the quirk callback, for example:

  u32 msr_id_need_to_save[] = {MSR_ID0, MSR_ID1, MSR_ID2...};

and the quirk mechanism ensures that, once resumed from suspend,
the MSRs indicated by these IDs will be restored to their
original, pre-suspend values.

Since both 64-bit and 32-bit kernels are affected, this patch
covers the common 64/32-bit suspend/resume code path. And
because the MSRs specified by the user might not be available or
readable in any situation, we use rdmsrl_safe() to safely save
these MSRs.
Reported-and-tested-by: default avatarMarcin Kaszewski <marcin.kaszewski@intel.com>
Signed-off-by: default avatarChen Yu <yu.c.chen@intel.com>
Acked-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: default avatarPavel Machek <pavel@ucw.cz>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bp@suse.de
Cc: len.brown@intel.com
Cc: linux@horizon.com
Cc: luto@kernel.org
Cc: rjw@rjwysocki.net
Link: http://lkml.kernel.org/r/c9abdcbc173dd2f57e8990e304376f19287e92ba.1448382971.git.yu.c.chen@intel.com
[ More edits to the naming of data structures. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6ffeba96
...@@ -32,6 +32,16 @@ struct msr_regs_info { ...@@ -32,6 +32,16 @@ struct msr_regs_info {
int err; int err;
}; };
struct saved_msr {
bool valid;
struct msr_info info;
};
struct saved_msrs {
unsigned int num;
struct saved_msr *array;
};
static inline unsigned long long native_read_tscp(unsigned int *aux) static inline unsigned long long native_read_tscp(unsigned int *aux)
{ {
unsigned long low, high; unsigned long low, high;
......
...@@ -15,6 +15,7 @@ struct saved_context { ...@@ -15,6 +15,7 @@ struct saved_context {
unsigned long cr0, cr2, cr3, cr4; unsigned long cr0, cr2, cr3, cr4;
u64 misc_enable; u64 misc_enable;
bool misc_enable_saved; bool misc_enable_saved;
struct saved_msrs saved_msrs;
struct desc_ptr gdt_desc; struct desc_ptr gdt_desc;
struct desc_ptr idt; struct desc_ptr idt;
u16 ldt; u16 ldt;
......
...@@ -24,6 +24,7 @@ struct saved_context { ...@@ -24,6 +24,7 @@ struct saved_context {
unsigned long cr0, cr2, cr3, cr4, cr8; unsigned long cr0, cr2, cr3, cr4, cr8;
u64 misc_enable; u64 misc_enable;
bool misc_enable_saved; bool misc_enable_saved;
struct saved_msrs saved_msrs;
unsigned long efer; unsigned long efer;
u16 gdt_pad; /* Unused */ u16 gdt_pad; /* Unused */
struct desc_ptr gdt_desc; struct desc_ptr gdt_desc;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <linux/dmi.h>
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx; __visible unsigned long saved_context_ebx;
...@@ -32,6 +33,29 @@ __visible unsigned long saved_context_eflags; ...@@ -32,6 +33,29 @@ __visible unsigned long saved_context_eflags;
#endif #endif
struct saved_context saved_context; struct saved_context saved_context;
static void msr_save_context(struct saved_context *ctxt)
{
struct saved_msr *msr = ctxt->saved_msrs.array;
struct saved_msr *end = msr + ctxt->saved_msrs.num;
while (msr < end) {
msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
msr++;
}
}
static void msr_restore_context(struct saved_context *ctxt)
{
struct saved_msr *msr = ctxt->saved_msrs.array;
struct saved_msr *end = msr + ctxt->saved_msrs.num;
while (msr < end) {
if (msr->valid)
wrmsrl(msr->info.msr_no, msr->info.reg.q);
msr++;
}
}
/** /**
* __save_processor_state - save CPU registers before creating a * __save_processor_state - save CPU registers before creating a
* hibernation image and before restoring the memory state from it * hibernation image and before restoring the memory state from it
...@@ -111,6 +135,7 @@ static void __save_processor_state(struct saved_context *ctxt) ...@@ -111,6 +135,7 @@ static void __save_processor_state(struct saved_context *ctxt)
#endif #endif
ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
&ctxt->misc_enable); &ctxt->misc_enable);
msr_save_context(ctxt);
} }
/* Needed by apm.c */ /* Needed by apm.c */
...@@ -229,6 +254,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) ...@@ -229,6 +254,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
x86_platform.restore_sched_clock_state(); x86_platform.restore_sched_clock_state();
mtrr_bp_restore(); mtrr_bp_restore();
perf_restore_debug_store(); perf_restore_debug_store();
msr_restore_context(ctxt);
} }
/* Needed by apm.c */ /* Needed by apm.c */
...@@ -320,3 +346,69 @@ static int __init bsp_pm_check_init(void) ...@@ -320,3 +346,69 @@ static int __init bsp_pm_check_init(void)
} }
core_initcall(bsp_pm_check_init); core_initcall(bsp_pm_check_init);
static int msr_init_context(const u32 *msr_id, const int total_num)
{
int i = 0;
struct saved_msr *msr_array;
if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
return -EINVAL;
}
msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
if (!msr_array) {
pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
return -ENOMEM;
}
for (i = 0; i < total_num; i++) {
msr_array[i].info.msr_no = msr_id[i];
msr_array[i].valid = false;
msr_array[i].info.reg.q = 0;
}
saved_context.saved_msrs.num = total_num;
saved_context.saved_msrs.array = msr_array;
return 0;
}
/*
* The following section is a quirk framework for problematic BIOSen:
* Sometimes MSRs are modified by the BIOSen after suspended to
* RAM, this might cause unexpected behavior after wakeup.
* Thus we save/restore these specified MSRs across suspend/resume
* in order to work around it.
*
* For any further problematic BIOSen/platforms,
* please add your own function similar to msr_initialize_bdw.
*/
static int msr_initialize_bdw(const struct dmi_system_id *d)
{
/* Add any extra MSR ids into this array. */
u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
}
static struct dmi_system_id msr_save_dmi_table[] = {
{
.callback = msr_initialize_bdw,
.ident = "BROADWELL BDX_EP",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
},
},
{}
};
static int pm_check_save_msr(void)
{
dmi_check_system(msr_save_dmi_table);
return 0;
}
device_initcall(pm_check_save_msr);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment